Name: tensorflow
Summary: Tensorflow
-Version: v1.5.0
+Version: v1.7.0
Release: 1
License: Apache-2.0
Source0: %{name}-%{version}.tar.gz
--- /dev/null
+---
+Language: Cpp
+BasedOnStyle: Google
+...
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include(CMakeParseArguments)
+
+
+#
+# create a library in the absl namespace
+#
+# parameters
+# SOURCES : sources files for the library
+# PUBLIC_LIBRARIES: targets and flags for linking phase
+# PRIVATE_COMPILE_FLAGS: compile flags for the library. Will not be exported.
+# EXPORT_NAME: export name for the absl:: target export
+# TARGET: target name
+#
+# create a target associated to <NAME>
+# libraries are installed under CMAKE_INSTALL_FULL_LIBDIR by default
+#
+function(absl_library)
+ cmake_parse_arguments(ABSL_LIB
+ "DISABLE_INSTALL" # keep that in case we want to support installation one day
+ "TARGET;EXPORT_NAME"
+ "SOURCES;PUBLIC_LIBRARIES;PRIVATE_COMPILE_FLAGS;PUBLIC_INCLUDE_DIRS;PRIVATE_INCLUDE_DIRS"
+ ${ARGN}
+ )
+
+ set(_NAME ${ABSL_LIB_TARGET})
+ string(TOUPPER ${_NAME} _UPPER_NAME)
+
+ add_library(${_NAME} STATIC ${ABSL_LIB_SOURCES})
+
+ target_compile_options(${_NAME} PRIVATE ${ABSL_COMPILE_CXXFLAGS} ${ABSL_LIB_PRIVATE_COMPILE_FLAGS})
+ target_link_libraries(${_NAME} PUBLIC ${ABSL_LIB_PUBLIC_LIBRARIES})
+ target_include_directories(${_NAME}
+ PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} ${ABSL_LIB_PUBLIC_INCLUDE_DIRS}
+ PRIVATE ${ABSL_LIB_PRIVATE_INCLUDE_DIRS}
+ )
+
+ if(ABSL_LIB_EXPORT_NAME)
+ add_library(absl::${ABSL_LIB_EXPORT_NAME} ALIAS ${_NAME})
+ endif()
+endfunction()
+
+
+
+#
+# header only virtual target creation
+#
+function(absl_header_library)
+ cmake_parse_arguments(ABSL_HO_LIB
+ "DISABLE_INSTALL"
+ "EXPORT_NAME;TARGET"
+ "PUBLIC_LIBRARIES;PRIVATE_COMPILE_FLAGS;PUBLIC_INCLUDE_DIRS;PRIVATE_INCLUDE_DIRS"
+ ${ARGN}
+ )
+
+ set(_NAME ${ABSL_HO_LIB_TARGET})
+
+ set(__dummy_header_only_lib_file "${CMAKE_CURRENT_BINARY_DIR}/${_NAME}_header_only_dummy.cc")
+
+ if(NOT EXISTS ${__dummy_header_only_lib_file})
+ file(WRITE ${__dummy_header_only_lib_file}
+ "/* generated file for header-only cmake target */
+
+ namespace absl {
+
+ // single meaningless symbol
+ void ${_NAME}__header_fakesym() {}
+ } // namespace absl
+ "
+ )
+ endif()
+
+
+ add_library(${_NAME} ${__dummy_header_only_lib_file})
+ target_link_libraries(${_NAME} PUBLIC ${ABSL_HO_LIB_PUBLIC_LIBRARIES})
+ target_include_directories(${_NAME}
+ PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} ${ABSL_HO_LIB_PUBLIC_INCLUDE_DIRS}
+ PRIVATE ${ABSL_HO_LIB_PRIVATE_INCLUDE_DIRS}
+ )
+
+ if(ABSL_HO_LIB_EXPORT_NAME)
+ add_library(absl::${ABSL_HO_LIB_EXPORT_NAME} ALIAS ${_NAME})
+ endif()
+
+endfunction()
+
+
+#
+# create an abseil unit_test and add it to the executed test list
+#
+# parameters
+# TARGET: target name prefix
+# SOURCES: sources files for the tests
+# PUBLIC_LIBRARIES: targets and flags for linking phase.
+# PRIVATE_COMPILE_FLAGS: compile flags for the test. Will not be exported.
+#
+# create a target associated to <NAME>_bin
+#
+# all tests will be register for execution with add_test()
+#
+# test compilation and execution is disable when BUILD_TESTING=OFF
+#
+function(absl_test)
+
+ cmake_parse_arguments(ABSL_TEST
+ ""
+ "TARGET"
+ "SOURCES;PUBLIC_LIBRARIES;PRIVATE_COMPILE_FLAGS;PUBLIC_INCLUDE_DIRS"
+ ${ARGN}
+ )
+
+
+ if(BUILD_TESTING)
+
+ set(_NAME ${ABSL_TEST_TARGET})
+ string(TOUPPER ${_NAME} _UPPER_NAME)
+
+ add_executable(${_NAME}_bin ${ABSL_TEST_SOURCES})
+
+ target_compile_options(${_NAME}_bin PRIVATE ${ABSL_COMPILE_CXXFLAGS} ${ABSL_TEST_PRIVATE_COMPILE_FLAGS})
+ target_link_libraries(${_NAME}_bin PUBLIC ${ABSL_TEST_PUBLIC_LIBRARIES} ${ABSL_TEST_COMMON_LIBRARIES})
+ target_include_directories(${_NAME}_bin
+ PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} ${ABSL_TEST_PUBLIC_INCLUDE_DIRS}
+ PRIVATE ${GMOCK_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS}
+ )
+
+ add_test(${_NAME}_test ${_NAME}_bin)
+ endif(BUILD_TESTING)
+
+endfunction()
+
+
+
+
+function(check_target my_target)
+
+ if(NOT TARGET ${my_target})
+ message(FATAL_ERROR " ABSL: compiling absl requires a ${my_target} CMake target in your project,
+ see CMake/README.md for more details")
+ endif(NOT TARGET ${my_target})
+
+endfunction()
--- /dev/null
+
+## Abseil CMake build instructions
+
+
+### Recommended usage : incorporate Abseil into an CMake project
+
+ For API / ABI compatibility reasons, it is recommended to build
+ and use abseil in a subdirectory of your project or as an embedded
+ dependency
+
+ This is similar to the recommended usage of the googletest framework
+ ( https://github.com/google/googletest/blob/master/googletest/README.md )
+
+ Build and use step-by-step
+
+
+ 1- Download abseil and copy it in a sub-directory in your project.
+ or add abseil as a git-submodule in your project
+
+ 2- If not done yet, download and copy in your project the two dependencies of
+ abseil `cctz` and `googletest`
+
+ * cctz https://github.com/google/cctz
+ * googletest https://github.com/google/googletest
+
+ 3- You can then use the cmake command `add_subdirectory()` to include
+ abseil directly and use the abseil targets in your project.
+
+ Note: Abseil requires CCTZ and the googletest framework. Consequently,
+ the targets `gtest`, `gtest_main`, `gmock` and `cctz` need
+ to be declared in your project before including abseil with `add_subdirectory`.
+
+
+ 4- Add the absl:: target you wish to use to the `target_link_libraries()`
+ section of your executable or of your library
+
+
+ Here is a short CMakeLists.txt example of a possible project file
+ using abseil
+
+ cmake_minimum_required(VERSION 2.8.12)
+ project(my_project)
+
+ set(CMAKE_CXX_FLAGS "-std=c++11 -stdlib=libc++ ${CMAKE_CXX_FLAGS}")
+
+ if (MSVC)
+ # /wd4005 macro-redefinition
+ # /wd4068 unknown pragma
+ # /wd4244 conversion from 'type1' to 'type2'
+ # /wd4267 conversion from 'size_t' to 'type2'
+ # /wd4800 force value to bool 'true' or 'false' (performance warning)
+ add_compile_options(/wd4005 /wd4068 /wd4244 /wd4267 /wd4800)
+ add_definitions(/DNOMINMAX /DWIN32_LEAN_AND_MEAN=1 /D_CRT_SECURE_NO_WARNINGS)
+ endif()
+
+ add_subdirectory(googletest)
+ add_subdirectory(cctz)
+ add_subdirectory(abseil-cpp)
+
+ add_executable(my_exe source.cpp)
+ target_link_libraries(my_exe absl::base absl::synchronization absl::strings)
+
+
+As of this writing, that pull request requires -DBUILD_TESTING=OFF as it doesn't correctly export cctz's dependency on Google Benchmark.
+
+ You will find here a non exhaustive list of absl public targets
+
+ absl::base
+ absl::algorithm
+ absl::container
+ absl::debugging
+ absl::memory
+ absl::meta
+ absl::numeric
+ absl::strings
+ absl::synchronization
+ absl::time
+ absl::utility
+
+
+
+
+
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+cmake_minimum_required(VERSION 2.8.12)
+project(absl)
+
+# enable ctest
+include(CTest)
+
+list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/CMake)
+
+include(GNUInstallDirs)
+include(AbseilHelpers)
+
+
+# config options
+if (MSVC)
+ # /wd4005 macro-redefinition
+ # /wd4068 unknown pragma
+ # /wd4244 conversion from 'type1' to 'type2'
+ # /wd4267 conversion from 'size_t' to 'type2'
+ # /wd4800 force value to bool 'true' or 'false' (performance warning)
+ add_compile_options(/W3 /WX /wd4005 /wd4068 /wd4244 /wd4267 /wd4800)
+ add_definitions(/DNOMINMAX /DWIN32_LEAN_AND_MEAN=1 /D_CRT_SECURE_NO_WARNINGS)
+else()
+ set(ABSL_STD_CXX_FLAG "-std=c++11" CACHE STRING "c++ std flag (default: c++11)")
+endif()
+
+
+
+##
+## Using absl targets
+##
+## all public absl targets are
+## exported with the absl:: prefix
+##
+## e.g absl::base absl::synchronization absl::strings ....
+##
+## DO NOT rely on the internal targets outside of the prefix
+
+
+# include current path
+list(APPEND ABSL_COMMON_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR})
+
+# -std=X
+set(CMAKE_CXX_FLAGS "${ABSL_STD_CXX_FLAG} ${CMAKE_CXX_FLAGS}")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_WARNING_VLA} ${CMAKE_CXX_FLAGS} ")
+
+
+# find dependencies
+## pthread
+find_package(Threads REQUIRED)
+
+# commented: used only for standalone test
+#add_subdirectory(cctz)
+#add_subdirectory(googletest)
+
+## check targets
+check_target(cctz)
+check_target(gtest)
+check_target(gtest_main)
+check_target(gmock)
+
+# -fexceptions
+set(ABSL_EXCEPTIONS_FLAG "${CMAKE_CXX_EXCEPTIONS}")
+
+list(APPEND ABSL_TEST_COMMON_LIBRARIES
+ gtest_main
+ gtest
+ gmock
+ ${CMAKE_THREAD_LIBS_INIT}
+)
+
+add_subdirectory(absl)
made and **why** it was made. Link to a GitHub issue if it exists.
* Don't fix code style and formatting unless you are already changing that
- line to address an issue. PRs with irrelevant changes won't be merged. If
+ line to address an issue. Formatting of modified lines may be done using
+ `git clang-format`. PRs with irrelevant changes won't be merged. If
you do want to fix formatting or style, do that in a separate PR.
* Unless your PR is trivial, you should expect there will be reviewer comments
- [Codemap](#codemap)
- [License](#license)
- [Links](#links)
+- [Build with cmake](#cmake)
<a name="about"></a>
## About Abseil
time zones.
* [`types`](absl/types/)
<br /> The `types` library contains non-container utility types, like a
- C++11-compatible version of `absl::optional`.
+ C++11-compatible version of the C++17 `std::optional` type.
## License
* Peruse our
[Abseil Compatibility Guarantees](http://abseil.io/about/compatibility) to
understand both what we promise to you, and what we expect of you in return.
+
+<a name="cmake"></a>
+## Build with CMake
-## Disclaimer
-
-* This is not an official Google product.
+Please check the [CMake build instructions](CMake/README.md)
values = {
"compiler": "llvm",
},
+ visibility = [":__subpackages__"],
)
# following configs are based on mapping defined in: https://git.io/v5Ijz
values = {
"cpu": "darwin",
},
+ visibility = [":__subpackages__"],
)
config_setting(
values = {
"cpu": "x64_windows",
},
+ visibility = [":__subpackages__"],
)
config_setting(
values = {
"cpu": "ppc",
},
+ visibility = [":__subpackages__"],
)
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+add_subdirectory(base)
+add_subdirectory(algorithm)
+add_subdirectory(container)
+add_subdirectory(debugging)
+add_subdirectory(memory)
+add_subdirectory(meta)
+add_subdirectory(numeric)
+add_subdirectory(strings)
+add_subdirectory(synchronization)
+add_subdirectory(time)
+add_subdirectory(types)
+add_subdirectory(utility)
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND ALGORITHM_PUBLIC_HEADERS
+ "algorithm.h"
+ "container.h"
+)
+
+
+#
+## TESTS
+#
+
+# test algorithm_test
+list(APPEND ALGORITHM_TEST_SRC
+ "algorithm_test.cc"
+ ${ALGORITHM_PUBLIC_HEADERS}
+ ${ALGORITHM_INTERNAL_HEADERS}
+)
+
+absl_header_library(
+ TARGET
+ absl_algorithm
+ EXPORT_NAME
+ algorithm
+)
+
+absl_test(
+ TARGET
+ algorithm_test
+ SOURCES
+ ${ALGORITHM_TEST_SRC}
+ PUBLIC_LIBRARIES
+ absl::algorithm
+)
+
+
+
+
+# test container_test
+set(CONTAINER_TEST_SRC "container_test.cc")
+
+absl_test(
+ TARGET
+ container_test
+ SOURCES
+ ${CONTAINER_TEST_SRC}
+ PUBLIC_LIBRARIES
+ absl::algorithm
+)
template <typename C>
using ContainerIter = decltype(begin(std::declval<C&>()));
+// An MSVC bug involving template parameter substitution requires us to use
+// decltype() here instead of just std::pair.
+template <typename C1, typename C2>
+using ContainerIterPairType =
+ decltype(std::make_pair(ContainerIter<C1>(), ContainerIter<C2>()));
+
template <typename C>
using ContainerDifferenceType =
decltype(std::distance(std::declval<ContainerIter<C>>(),
// Container-based version of the <algorithm> `std::mismatchf()` function to
// return the first element where two ordered containers differ.
template <typename C1, typename C2>
-std::pair<container_algorithm_internal::ContainerIter<C1>,
- container_algorithm_internal::ContainerIter<C2>>
+container_algorithm_internal::ContainerIterPairType<C1, C2>
c_mismatch(C1& c1, C2& c2) {
return std::mismatch(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
// Overload of c_mismatch() for using a predicate evaluation other than `==` as
// the function's test condition.
template <typename C1, typename C2, typename BinaryPredicate>
-std::pair<container_algorithm_internal::ContainerIter<C1>,
- container_algorithm_internal::ContainerIter<C2>>
+container_algorithm_internal::ContainerIterPairType<C1, C2>
c_mismatch(C1& c1, C2& c2, BinaryPredicate&& pred) {
return std::mismatch(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
// c_is_sorted()
//
// Container-based version of the <algorithm> `std::is_sorted()` function
-// to evaluate whether the given containter is sorted in ascending order.
+// to evaluate whether the given container is sorted in ascending order.
template <typename C>
bool c_is_sorted(const C& c) {
return std::is_sorted(container_algorithm_internal::c_begin(c),
// to return an iterator pair pointing to the first and last elements in a
// sorted container which compare equal to `value`.
template <typename Sequence, typename T>
-std::pair<container_algorithm_internal::ContainerIter<Sequence>,
- container_algorithm_internal::ContainerIter<Sequence>>
+container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
c_equal_range(Sequence& sequence, T&& value) {
return std::equal_range(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
// Overload of c_equal_range() for performing a `comp` comparison other than
// the default `operator<`.
template <typename Sequence, typename T, typename Compare>
-std::pair<container_algorithm_internal::ContainerIter<Sequence>,
- container_algorithm_internal::ContainerIter<Sequence>>
+container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
c_equal_range(Sequence& sequence, T&& value, Compare&& comp) {
return std::equal_range(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
// smallest and largest values, respectively, using `operator<` to make the
// comparisons.
template <typename C>
-std::pair<container_algorithm_internal::ContainerIter<C>,
- container_algorithm_internal::ContainerIter<C>>
+container_algorithm_internal::ContainerIterPairType<C, C>
c_minmax_element(C& c) {
return std::minmax_element(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
// Overload of c_minmax_element() for performing `comp` comparisons other than
// `operator<`.
template <typename C, typename Compare>
-std::pair<container_algorithm_internal::ContainerIter<C>,
- container_algorithm_internal::ContainerIter<C>>
+container_algorithm_internal::ContainerIterPairType<C, C>
c_minmax_element(C& c, Compare&& comp) {
return std::minmax_element(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
licenses(["notice"]) # Apache 2.0
-exports_files(["thread_annotations.h"])
-
cc_library(
name = "spinlock_wait",
srcs = [
+ "internal/spinlock_akaros.inc",
"internal/spinlock_posix.inc",
"internal/spinlock_wait.cc",
"internal/spinlock_win32.inc",
"internal/spinlock_wait.h",
],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl/base:__pkg__",
+ ],
deps = [":core_headers"],
)
"internal/malloc_extension_c.h",
],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
deps = [
":core_headers",
":dynamic_annotations",
textual_hdrs = [
"internal/malloc_hook_invoke.h",
],
+ visibility = [
+ "//absl:__subpackages__",
+ ],
deps = [
":base",
":config",
"internal/invoke.h",
],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
)
cc_library(
srcs = ["internal/throw_delegate.cc"],
hdrs = ["internal/throw_delegate.h"],
copts = ABSL_DEFAULT_COPTS + ABSL_EXCEPTIONS_FLAG,
- features = [
- "-use_header_modules",
+ visibility = [
+ "//absl:__subpackages__",
],
deps = [
":base",
testonly = 1,
hdrs = ["internal/exception_testing.h"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//absl:__subpackages__"],
+ deps = [
+ ":config",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "pretty_function",
+ hdrs = ["internal/pretty_function.h"],
+ visibility = ["//absl:__subpackages__"],
+)
+
+cc_library(
+ name = "exception_safety_testing",
+ testonly = 1,
+ srcs = ["internal/exception_safety_testing.cc"],
+ hdrs = ["internal/exception_safety_testing.h"],
+ copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
deps = [
":config",
+ ":pretty_function",
+ "//absl/memory",
+ "//absl/meta:type_traits",
+ "//absl/strings",
+ "//absl/types:optional",
"@com_google_googletest//:gtest",
],
)
+cc_test(
+ name = "exception_safety_testing_test",
+ srcs = ["exception_safety_testing_test.cc"],
+ copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
+ deps = [
+ ":exception_safety_testing",
+ "//absl/memory",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_test(
name = "invoke_test",
size = "small",
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND BASE_PUBLIC_HEADERS
+ "attributes.h"
+ "call_once.h"
+ "casts.h"
+ "config.h"
+ "dynamic_annotations.h"
+ "macros.h"
+ "optimization.h"
+ "policy_checks.h"
+ "port.h"
+ "thread_annotations.h"
+)
+
+
+list(APPEND BASE_INTERNAL_HEADERS
+ "internal/atomic_hook.h"
+ "internal/cycleclock.h"
+ "internal/endian.h"
+ "internal/exception_testing.h"
+ "internal/identity.h"
+ "internal/invoke.h"
+ "internal/log_severity.h"
+ "internal/low_level_alloc.h"
+ "internal/low_level_scheduling.h"
+ "internal/malloc_extension_c.h"
+ "internal/malloc_extension.h"
+ "internal/malloc_hook_c.h"
+ "internal/malloc_hook.h"
+ "internal/malloc_hook_invoke.h"
+ "internal/per_thread_tls.h"
+ "internal/raw_logging.h"
+ "internal/scheduling_mode.h"
+ "internal/spinlock.h"
+ "internal/spinlock_wait.h"
+ "internal/sysinfo.h"
+ "internal/thread_identity.h"
+ "internal/throw_delegate.h"
+ "internal/tsan_mutex_interface.h"
+ "internal/unaligned_access.h"
+ "internal/unscaledcycleclock.h"
+)
+
+
+# absl_base main library
+list(APPEND BASE_SRC
+ "internal/cycleclock.cc"
+ "internal/raw_logging.cc"
+ "internal/spinlock.cc"
+ "internal/sysinfo.cc"
+ "internal/thread_identity.cc"
+ "internal/unscaledcycleclock.cc"
+ "internal/low_level_alloc.cc"
+ "internal/malloc_hook.cc"
+ ${BASE_PUBLIC_HEADERS}
+ ${BASE_INTERNAL_HEADERS}
+)
+
+absl_library(
+ TARGET
+ absl_base
+ SOURCES
+ ${BASE_SRC}
+ PUBLIC_LIBRARIES
+ absl_dynamic_annotations
+ absl_spinlock_wait
+ EXPORT_NAME
+ base
+)
+
+# malloc extension library
+set(MALLOC_EXTENSION_SRC "internal/malloc_extension.cc")
+set(MALLOC_EXTENSION_PUBLIC_LIBRARIES absl::base)
+
+absl_library(
+ TARGET
+ absl_malloc_extension
+ SOURCES
+ ${MALLOC_EXTENSION_SRC}
+ PUBLIC_LIBRARIES
+ ${MALLOC_EXTENSION_PUBLIC_LIBRARIES}
+ EXPORT_NAME
+ malloc_extension
+)
+
+# throw delegate library
+set(THROW_DELEGATE_SRC "internal/throw_delegate.cc")
+
+absl_library(
+ TARGET
+ absl_throw_delegate
+ SOURCES
+ ${THROW_DELEGATE_SRC}
+ PUBLIC_LIBRARIES
+ ${THROW_DELEGATE_PUBLIC_LIBRARIES}
+ PRIVATE_COMPILE_FLAGS
+ ${ABSL_EXCEPTIONS_FLAG}
+ EXPORT_NAME
+ throw_delegate
+)
+
+
+# dynamic_annotations library
+set(DYNAMIC_ANNOTATIONS_SRC "dynamic_annotations.cc")
+
+absl_library(
+ TARGET
+ absl_dynamic_annotations
+ SOURCES
+ ${DYNAMIC_ANNOTATIONS_SRC}
+)
+
+
+# spinlock_wait library
+set(SPINLOCK_WAIT_SRC "internal/spinlock_wait.cc")
+
+absl_library(
+ TARGET
+ absl_spinlock_wait
+ SOURCES
+ ${SPINLOCK_WAIT_SRC}
+)
+
+
+# malloc_internal library
+list(APPEND MALLOC_INTERNAL_SRC
+ "internal/low_level_alloc.cc"
+ "internal/malloc_hook.cc"
+ "internal/malloc_hook_mmap_linux.inc"
+)
+
+absl_library(
+ TARGET
+ absl_malloc_internal
+ SOURCES
+ ${MALLOC_INTERNAL_SRC}
+ PUBLIC_LIBRARIES
+ absl_dynamic_annotations
+)
+
+
+
+#
+## TESTS
+#
+
+# call once test
+set(CALL_ONCE_TEST_SRC "call_once_test.cc")
+set(CALL_ONCE_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization)
+
+absl_test(
+ TARGET
+ call_once_test
+ SOURCES
+ ${CALL_ONCE_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${CALL_ONCE_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test bit_cast_test
+set(BIT_CAST_TEST_SRC "bit_cast_test.cc")
+
+absl_test(
+ TARGET
+ bit_cast_test
+ SOURCES
+ ${BIT_CAST_TEST_SRC}
+)
+
+
+# test absl_throw_delegate_test
+set(THROW_DELEGATE_TEST_SRC "throw_delegate_test.cc")
+set(THROW_DELEGATE_TEST_PUBLIC_LIBRARIES absl::base absl_throw_delegate)
+
+absl_test(
+ TARGET
+ throw_delegate_test
+ SOURCES
+ ${THROW_DELEGATE_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${THROW_DELEGATE_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test invoke_test
+set(INVOKE_TEST_SRC "invoke_test.cc")
+set(INVOKE_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ invoke_test
+ SOURCES
+ ${INVOKE_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${INVOKE_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test spinlock_test_common
+set(SPINLOCK_TEST_COMMON_SRC "spinlock_test_common.cc")
+set(SPINLOCK_TEST_COMMON_PUBLIC_LIBRARIES absl::base absl::synchronization)
+
+absl_test(
+ TARGET
+ spinlock_test_common
+ SOURCES
+ ${SPINLOCK_TEST_COMMON_SRC}
+ PUBLIC_LIBRARIES
+ ${SPINLOCK_TEST_COMMON_PUBLIC_LIBRARIES}
+)
+
+
+# test spinlock_test
+set(SPINLOCK_TEST_SRC "spinlock_test_common.cc")
+set(SPINLOCK_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization)
+
+absl_test(
+ TARGET
+ spinlock_test
+ SOURCES
+ ${SPINLOCK_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${SPINLOCK_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test endian_test
+set(ENDIAN_TEST_SRC "internal/endian_test.cc")
+
+absl_test(
+ TARGET
+ endian_test
+ SOURCES
+ ${ENDIAN_TEST_SRC}
+)
+
+
+# test config_test
+set(CONFIG_TEST_SRC "config_test.cc")
+set(CONFIG_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization)
+absl_test(
+ TARGET
+ config_test
+ SOURCES
+ ${CONFIG_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${CONFIG_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test raw_logging_test
+set(RAW_LOGGING_TEST_SRC "raw_logging_test.cc")
+set(RAW_LOGGING_TEST_PUBLIC_LIBRARIES absl::base)
+
+absl_test(
+ TARGET
+ raw_logging_test
+ SOURCES
+ ${RAW_LOGGING_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${RAW_LOGGING_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test sysinfo_test
+set(SYSINFO_TEST_SRC "internal/sysinfo_test.cc")
+set(SYSINFO_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization)
+
+absl_test(
+ TARGET
+ sysinfo_test
+ SOURCES
+ ${SYSINFO_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${SYSINFO_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test low_level_alloc_test
+set(LOW_LEVEL_ALLOC_TEST_SRC "internal/low_level_alloc_test.cc")
+set(LOW_LEVEL_ALLOC_TEST_PUBLIC_LIBRARIES absl::base)
+
+absl_test(
+ TARGET
+ low_level_alloc_test
+ SOURCES
+ ${LOW_LEVEL_ALLOC_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${LOW_LEVEL_ALLOC_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test thread_identity_test
+set(THREAD_IDENTITY_TEST_SRC "internal/thread_identity_test.cc")
+set(THREAD_IDENTITY_TEST_PUBLIC_LIBRARIES absl::base absl::synchronization)
+
+absl_test(
+ TARGET
+ thread_identity_test
+ SOURCES
+ ${THREAD_IDENTITY_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${THREAD_IDENTITY_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test absl_malloc_extension_system_malloc_test
+set(MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_SRC "internal/malloc_extension_test.cc")
+set(MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_PUBLIC_LIBRARIES absl::base absl_malloc_extension)
+set(MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_PRIVATE_COMPILE_FLAGS "-DABSL_MALLOC_EXTENSION_TEST_ALLOW_MISSING_EXTENSION=1")
+
+absl_test(
+ TARGET
+ absl_malloc_extension_system_malloc_test
+ SOURCES
+ ${MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_PUBLIC_LIBRARIES}
+ PRIVATE_COMPILE_FLAGS
+ ${MALLOC_EXTENSION_SYSTEM_MALLOC_TEST_PRIVATE_COMPILE_FLAGS}
+)
+
+
+
+
// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
//
// Tells the UndefinedSanitizer to ignore a given function. Useful for cases
-// where certain behavior (eg. devision by zero) is being used intentionally.
+// where certain behavior (eg. division by zero) is being used intentionally.
// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9.
// https://gcc.gnu.org/gcc-4.9/changes.html
#if defined(__GNUC__) && \
#define ABSL_HAVE_TLS 1
#endif
-// There are platforms for which TLS should not be used even though the compiler
-// makes it seem like it's supported (Android NDK < r12b for example).
-// This is primarily because of linker problems and toolchain misconfiguration:
-// Abseil does not intend to support this indefinitely. Currently, the newest
-// toolchain that we intend to support that requires this behavior is the
-// r11 NDK - allowing for a 5 year support window on that means this option
-// is likely to be removed around June of 2021.
-#if defined(__ANDROID__) && defined(__clang__)
-#if __has_include(<android/ndk-version.h>)
-#include <android/ndk-version.h>
-#endif
-// TLS isn't supported until NDK r12b per
-// https://developer.android.com/ndk/downloads/revision_history.html
-// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
-// <android/ndk-version.h>. For NDK < r16, users should define these macros,
-// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
-#if defined(__NDK_MAJOR__) && defined(__NDK_MINOR__) && \
- ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
-#undef ABSL_HAVE_TLS
-#endif
-#endif // defined(__ANDROID__) && defined(__clang__)
-
// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
//
// Checks whether `std::is_trivially_destructible<T>` is supported.
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
+// There are platforms for which TLS should not be used even though the compiler
+// makes it seem like it's supported (Android NDK < r12b for example).
+// This is primarily because of linker problems and toolchain misconfiguration:
+// Abseil does not intend to support this indefinitely. Currently, the newest
+// toolchain that we intend to support that requires this behavior is the
+// r11 NDK - allowing for a 5 year support window on that means this option
+// is likely to be removed around June of 2021.
+// TLS isn't supported until NDK r12b per
+// https://developer.android.com/ndk/downloads/revision_history.html
+// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
+// <android/ndk-version.h>. For NDK < r16, users should define these macros,
+// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
+#if defined(__ANDROID__) && defined(__clang__)
+#if __has_include(<android/ndk-version.h>)
+#include <android/ndk-version.h>
+#endif // __has_include(<android/ndk-version.h>)
+#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
+ defined(__NDK_MINOR__) && \
+ ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
+#undef ABSL_HAVE_TLS
+#undef ABSL_HAVE_THREAD_LOCAL
+#endif
+#endif // defined(__ANDROID__) && defined(__clang__)
+
// ABSL_HAVE_INTRINSIC_INT128
//
// Checks whether the __int128 compiler extension for a 128-bit integral type is
#ifdef ABSL_HAVE_INTRINSIC_INT128
#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set
#elif (defined(__clang__) && defined(__SIZEOF_INT128__) && \
- !defined(__ppc64__) && !defined(__aarch64__)) || \
+ !defined(__aarch64__)) || \
(defined(__CUDACC__) && defined(__SIZEOF_INT128__) && \
- __CUDACC_VER__ >= 70000) || \
+ __CUDACC_VER_MAJOR__ >= 9) || \
(!defined(__clang__) && !defined(__CUDACC__) && defined(__GNUC__) && \
defined(__SIZEOF_INT128__))
#define ABSL_HAVE_INTRINSIC_INT128 1
+// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a
+// std::string explaining that it has been removed starting with CUDA 9. We can't
+// compare both variants in a single boolean expression because there is no
+// short-circuiting in the preprocessor.
+#elif defined(__CUDACC__) && defined(__SIZEOF_INT128__) && \
+ __CUDACC_VER__ >= 7000
+#define ABSL_HAVE_INTRINSIC_INT128 1
#endif
// ABSL_HAVE_EXCEPTIONS
// Windows _WIN32
// NaCL __native_client__
// AsmJS __asmjs__
-// Fuschia __Fuchsia__
+// Fuchsia __Fuchsia__
//
// Note that since Android defines both __ANDROID__ and __linux__, one
// may probe for either Linux or Android by simply testing for __linux__.
// POSIX.1-2001.
#ifdef ABSL_HAVE_MMAP
#error ABSL_HAVE_MMAP cannot be directly set
-#elif defined(__linux__) || defined(__APPLE__) || defined(__ros__) || \
- defined(__native_client__) || defined(__asmjs__) || defined(__Fuchsia__)
+#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \
+ defined(__Fuchsia__)
#define ABSL_HAVE_MMAP 1
#endif
// functions as defined in POSIX.1-2001.
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
-#elif defined(__linux__) || defined(__APPLE__) || defined(__ros__)
+#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(__ros__)
#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
#endif
#endif
+// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than
+// the support for <optional>, <any>, <string_view>. So we use _MSC_VER to check
+// whether we have VS 2017 RTM (when <optional>, <any>, <string_view> is
+// implemented) or higher.
+// Also, `__cplusplus` is not correctly set by MSVC, so we use `_MSVC_LANG` to
+// check the language version.
+// TODO(zhangxy): fix tests before enabling aliasing for `std::any`,
+// `std::string_view`.
+#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
+ ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402)
+// #define ABSL_HAVE_STD_ANY 1
+#define ABSL_HAVE_STD_OPTIONAL 1
+// #define ABSL_HAVE_STD_STRING_VIEW 1
+#endif
+
#endif // ABSL_BASE_CONFIG_H_
--- /dev/null
+#include "absl/base/internal/exception_safety_testing.h"
+
+#include <cstddef>
+#include <exception>
+#include <iostream>
+#include <list>
+#include <vector>
+
+#include "gtest/gtest-spi.h"
+#include "gtest/gtest.h"
+#include "absl/memory/memory.h"
+
+namespace absl {
+namespace {
+using ::absl::exceptions_internal::TestException;
+
+// EXPECT_NO_THROW can't inspect the thrown inspection in general.
+template <typename F>
+void ExpectNoThrow(const F& f) {
+ try {
+ f();
+ } catch (TestException e) {
+ ADD_FAILURE() << "Unexpected exception thrown from " << e.what();
+ }
+}
+
+class ThrowingValueTest : public ::testing::Test {
+ protected:
+ void SetUp() override { UnsetCountdown(); }
+
+ private:
+ AllocInspector clouseau_;
+};
+
+TEST_F(ThrowingValueTest, Throws) {
+ SetCountdown();
+ EXPECT_THROW(ThrowingValue<> bomb, TestException);
+
+ // It's not guaranteed that every operator only throws *once*. The default
+ // ctor only throws once, though, so use it to make sure we only throw when
+ // the countdown hits 0
+ exceptions_internal::countdown = 2;
+ ExpectNoThrow([]() { ThrowingValue<> bomb; });
+ ExpectNoThrow([]() { ThrowingValue<> bomb; });
+ EXPECT_THROW(ThrowingValue<> bomb, TestException);
+}
+
+// Tests that an operation throws when the countdown is at 0, doesn't throw when
+// the countdown doesn't hit 0, and doesn't modify the state of the
+// ThrowingValue if it throws
+template <typename F>
+void TestOp(F&& f) {
+ UnsetCountdown();
+ ExpectNoThrow(f);
+
+ SetCountdown();
+ EXPECT_THROW(f(), TestException);
+ UnsetCountdown();
+}
+
+TEST_F(ThrowingValueTest, ThrowingCtors) {
+ ThrowingValue<> bomb;
+
+ TestOp([]() { ThrowingValue<> bomb(1); });
+ TestOp([&]() { ThrowingValue<> bomb1 = bomb; });
+ TestOp([&]() { ThrowingValue<> bomb1 = std::move(bomb); });
+}
+
+TEST_F(ThrowingValueTest, ThrowingAssignment) {
+ ThrowingValue<> bomb, bomb1;
+
+ TestOp([&]() { bomb = bomb1; });
+ TestOp([&]() { bomb = std::move(bomb1); });
+}
+
+TEST_F(ThrowingValueTest, ThrowingComparisons) {
+ ThrowingValue<> bomb1, bomb2;
+ TestOp([&]() { return bomb1 == bomb2; });
+ TestOp([&]() { return bomb1 != bomb2; });
+ TestOp([&]() { return bomb1 < bomb2; });
+ TestOp([&]() { return bomb1 <= bomb2; });
+ TestOp([&]() { return bomb1 > bomb2; });
+ TestOp([&]() { return bomb1 >= bomb2; });
+}
+
+TEST_F(ThrowingValueTest, ThrowingArithmeticOps) {
+ ThrowingValue<> bomb1(1), bomb2(2);
+
+ TestOp([&bomb1]() { +bomb1; });
+ TestOp([&bomb1]() { -bomb1; });
+ TestOp([&bomb1]() { ++bomb1; });
+ TestOp([&bomb1]() { bomb1++; });
+ TestOp([&bomb1]() { --bomb1; });
+ TestOp([&bomb1]() { bomb1--; });
+
+ TestOp([&]() { bomb1 + bomb2; });
+ TestOp([&]() { bomb1 - bomb2; });
+ TestOp([&]() { bomb1* bomb2; });
+ TestOp([&]() { bomb1 / bomb2; });
+ TestOp([&]() { bomb1 << 1; });
+ TestOp([&]() { bomb1 >> 1; });
+}
+
+TEST_F(ThrowingValueTest, ThrowingLogicalOps) {
+ ThrowingValue<> bomb1, bomb2;
+
+ TestOp([&bomb1]() { !bomb1; });
+ TestOp([&]() { bomb1&& bomb2; });
+ TestOp([&]() { bomb1 || bomb2; });
+}
+
+TEST_F(ThrowingValueTest, ThrowingBitwiseOps) {
+ ThrowingValue<> bomb1, bomb2;
+
+ TestOp([&bomb1]() { ~bomb1; });
+ TestOp([&]() { bomb1& bomb2; });
+ TestOp([&]() { bomb1 | bomb2; });
+ TestOp([&]() { bomb1 ^ bomb2; });
+}
+
+TEST_F(ThrowingValueTest, ThrowingCompoundAssignmentOps) {
+ ThrowingValue<> bomb1(1), bomb2(2);
+
+ TestOp([&]() { bomb1 += bomb2; });
+ TestOp([&]() { bomb1 -= bomb2; });
+ TestOp([&]() { bomb1 *= bomb2; });
+ TestOp([&]() { bomb1 /= bomb2; });
+ TestOp([&]() { bomb1 %= bomb2; });
+ TestOp([&]() { bomb1 &= bomb2; });
+ TestOp([&]() { bomb1 |= bomb2; });
+ TestOp([&]() { bomb1 ^= bomb2; });
+ TestOp([&]() { bomb1 *= bomb2; });
+}
+
+TEST_F(ThrowingValueTest, ThrowingStreamOps) {
+ ThrowingValue<> bomb;
+
+ TestOp([&]() { std::cin >> bomb; });
+ TestOp([&]() { std::cout << bomb; });
+}
+
+TEST_F(ThrowingValueTest, ThrowingAllocatingOps) {
+ // make_unique calls unqualified operator new, so these exercise the
+ // ThrowingValue overloads.
+ TestOp([]() { return absl::make_unique<ThrowingValue<>>(1); });
+ TestOp([]() { return absl::make_unique<ThrowingValue<>[]>(2); });
+}
+
+TEST_F(ThrowingValueTest, NonThrowingMoveCtor) {
+ ThrowingValue<NoThrow::kMoveCtor> nothrow_ctor;
+
+ SetCountdown();
+ ExpectNoThrow([¬hrow_ctor]() {
+ ThrowingValue<NoThrow::kMoveCtor> nothrow1 = std::move(nothrow_ctor);
+ });
+}
+
+TEST_F(ThrowingValueTest, NonThrowingMoveAssign) {
+ ThrowingValue<NoThrow::kMoveAssign> nothrow_assign1, nothrow_assign2;
+
+ SetCountdown();
+ ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() {
+ nothrow_assign1 = std::move(nothrow_assign2);
+ });
+}
+
+TEST_F(ThrowingValueTest, ThrowingSwap) {
+ ThrowingValue<> bomb1, bomb2;
+ TestOp([&]() { std::swap(bomb1, bomb2); });
+
+ ThrowingValue<NoThrow::kMoveCtor> bomb3, bomb4;
+ TestOp([&]() { std::swap(bomb3, bomb4); });
+
+ ThrowingValue<NoThrow::kMoveAssign> bomb5, bomb6;
+ TestOp([&]() { std::swap(bomb5, bomb6); });
+}
+
+TEST_F(ThrowingValueTest, NonThrowingSwap) {
+ ThrowingValue<NoThrow::kMoveAssign | NoThrow::kMoveCtor> bomb1, bomb2;
+ ExpectNoThrow([&]() { std::swap(bomb1, bomb2); });
+}
+
+TEST_F(ThrowingValueTest, NonThrowingAllocation) {
+ ThrowingValue<NoThrow::kAllocation>* allocated;
+ ThrowingValue<NoThrow::kAllocation>* array;
+
+ ExpectNoThrow([&allocated]() {
+ allocated = new ThrowingValue<NoThrow::kAllocation>(1);
+ delete allocated;
+ });
+ ExpectNoThrow([&array]() {
+ array = new ThrowingValue<NoThrow::kAllocation>[2];
+ delete[] array;
+ });
+}
+
+TEST_F(ThrowingValueTest, NonThrowingDelete) {
+ auto* allocated = new ThrowingValue<>(1);
+ auto* array = new ThrowingValue<>[2];
+
+ SetCountdown();
+ ExpectNoThrow([allocated]() { delete allocated; });
+ SetCountdown();
+ ExpectNoThrow([array]() { delete[] array; });
+}
+
+using Storage =
+ absl::aligned_storage_t<sizeof(ThrowingValue<>), alignof(ThrowingValue<>)>;
+
+TEST_F(ThrowingValueTest, NonThrowingPlacementDelete) {
+ constexpr int kArrayLen = 2;
+ // We intentionally create extra space to store the tag allocated by placement
+ // new[].
+ constexpr int kStorageLen = 4;
+
+ Storage buf;
+ Storage array_buf[kStorageLen];
+ auto* placed = new (&buf) ThrowingValue<>(1);
+ auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen];
+
+ SetCountdown();
+ ExpectNoThrow([placed, &buf]() {
+ placed->~ThrowingValue<>();
+ ThrowingValue<>::operator delete(placed, &buf);
+ });
+
+ SetCountdown();
+ ExpectNoThrow([&, placed_array]() {
+ for (int i = 0; i < kArrayLen; ++i) placed_array[i].~ThrowingValue<>();
+ ThrowingValue<>::operator delete[](placed_array, &array_buf);
+ });
+}
+
+TEST_F(ThrowingValueTest, NonThrowingDestructor) {
+ auto* allocated = new ThrowingValue<>();
+ SetCountdown();
+ ExpectNoThrow([allocated]() { delete allocated; });
+}
+
+TEST(ThrowingBoolTest, ThrowingBool) {
+ UnsetCountdown();
+ ThrowingBool t = true;
+
+ // Test that it's contextually convertible to bool
+ if (t) { // NOLINT(whitespace/empty_if_body)
+ }
+ EXPECT_TRUE(t);
+
+ TestOp([&]() { (void)!t; });
+}
+
+class ThrowingAllocatorTest : public ::testing::Test {
+ protected:
+ void SetUp() override { UnsetCountdown(); }
+
+ private:
+ AllocInspector borlu_;
+};
+
+TEST_F(ThrowingAllocatorTest, MemoryManagement) {
+ // Just exercise the memory management capabilities under LSan to make sure we
+ // don't leak.
+ ThrowingAllocator<int> int_alloc;
+ int* ip = int_alloc.allocate(1);
+ int_alloc.deallocate(ip, 1);
+ int* i_array = int_alloc.allocate(2);
+ int_alloc.deallocate(i_array, 2);
+
+ ThrowingAllocator<ThrowingValue<>> ef_alloc;
+ ThrowingValue<>* efp = ef_alloc.allocate(1);
+ ef_alloc.deallocate(efp, 1);
+ ThrowingValue<>* ef_array = ef_alloc.allocate(2);
+ ef_alloc.deallocate(ef_array, 2);
+}
+
+TEST_F(ThrowingAllocatorTest, CallsGlobalNew) {
+ ThrowingAllocator<ThrowingValue<>, NoThrow::kNoThrow> nothrow_alloc;
+ ThrowingValue<>* ptr;
+
+ SetCountdown();
+ // This will only throw if ThrowingValue::new is called.
+ ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); });
+ nothrow_alloc.deallocate(ptr, 1);
+}
+
+TEST_F(ThrowingAllocatorTest, ThrowingConstructors) {
+ ThrowingAllocator<int> int_alloc;
+ int* ip = nullptr;
+
+ SetCountdown();
+ EXPECT_THROW(ip = int_alloc.allocate(1), TestException);
+ ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
+
+ *ip = 1;
+ SetCountdown();
+ EXPECT_THROW(int_alloc.construct(ip, 2), TestException);
+ EXPECT_EQ(*ip, 1);
+ int_alloc.deallocate(ip, 1);
+}
+
+TEST_F(ThrowingAllocatorTest, NonThrowingConstruction) {
+ {
+ ThrowingAllocator<int, NoThrow::kNoThrow> int_alloc;
+ int* ip = nullptr;
+
+ SetCountdown();
+ ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
+ SetCountdown();
+ ExpectNoThrow([&]() { int_alloc.construct(ip, 2); });
+ EXPECT_EQ(*ip, 2);
+ int_alloc.deallocate(ip, 1);
+ }
+
+ UnsetCountdown();
+ {
+ ThrowingAllocator<int> int_alloc;
+ int* ip = nullptr;
+ ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
+ ExpectNoThrow([&]() { int_alloc.construct(ip, 2); });
+ EXPECT_EQ(*ip, 2);
+ int_alloc.deallocate(ip, 1);
+ }
+
+ UnsetCountdown();
+ {
+ ThrowingAllocator<ThrowingValue<NoThrow::kIntCtor>, NoThrow::kNoThrow>
+ ef_alloc;
+ ThrowingValue<NoThrow::kIntCtor>* efp;
+ SetCountdown();
+ ExpectNoThrow([&]() { efp = ef_alloc.allocate(1); });
+ SetCountdown();
+ ExpectNoThrow([&]() { ef_alloc.construct(efp, 2); });
+ EXPECT_EQ(efp->Get(), 2);
+ ef_alloc.destroy(efp);
+ ef_alloc.deallocate(efp, 1);
+ }
+
+ UnsetCountdown();
+ {
+ ThrowingAllocator<int> a;
+ SetCountdown();
+ ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = a; });
+ SetCountdown();
+ ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = std::move(a); });
+ }
+}
+
+TEST_F(ThrowingAllocatorTest, ThrowingAllocatorConstruction) {
+ ThrowingAllocator<int> a;
+ TestOp([]() { ThrowingAllocator<int> a; });
+ TestOp([&]() { a.select_on_container_copy_construction(); });
+}
+
+TEST_F(ThrowingAllocatorTest, State) {
+ ThrowingAllocator<int> a1, a2;
+ EXPECT_NE(a1, a2);
+
+ auto a3 = a1;
+ EXPECT_EQ(a3, a1);
+ int* ip = a1.allocate(1);
+ EXPECT_EQ(a3, a1);
+ a3.deallocate(ip, 1);
+ EXPECT_EQ(a3, a1);
+}
+
+TEST_F(ThrowingAllocatorTest, InVector) {
+ std::vector<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> v;
+ for (int i = 0; i < 20; ++i) v.push_back({});
+ for (int i = 0; i < 20; ++i) v.pop_back();
+}
+
+TEST_F(ThrowingAllocatorTest, InList) {
+ std::list<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> l;
+ for (int i = 0; i < 20; ++i) l.push_back({});
+ for (int i = 0; i < 20; ++i) l.pop_back();
+ for (int i = 0; i < 20; ++i) l.push_front({});
+ for (int i = 0; i < 20; ++i) l.pop_front();
+}
+
+struct CallOperator {
+ template <typename T>
+ void operator()(T* t) const {
+ (*t)();
+ }
+};
+
+struct NonNegative {
+ friend testing::AssertionResult AbslCheckInvariants(NonNegative* g) {
+ if (g->i >= 0) return testing::AssertionSuccess();
+ return testing::AssertionFailure()
+ << "i should be non-negative but is " << g->i;
+ }
+ bool operator==(const NonNegative& other) const { return i == other.i; }
+
+ int i;
+};
+
+template <typename T>
+struct DefaultFactory {
+ std::unique_ptr<T> operator()() const { return absl::make_unique<T>(); }
+};
+
+struct FailsBasicGuarantee : public NonNegative {
+ void operator()() {
+ --i;
+ ThrowingValue<> bomb;
+ ++i;
+ }
+};
+
+TEST(ExceptionCheckTest, BasicGuaranteeFailure) {
+ EXPECT_FALSE(TestExceptionSafety(DefaultFactory<FailsBasicGuarantee>(),
+ CallOperator{}));
+}
+
+struct FollowsBasicGuarantee : public NonNegative {
+ void operator()() {
+ ++i;
+ ThrowingValue<> bomb;
+ }
+};
+
+TEST(ExceptionCheckTest, BasicGuarantee) {
+ EXPECT_TRUE(TestExceptionSafety(DefaultFactory<FollowsBasicGuarantee>(),
+ CallOperator{}));
+}
+
+TEST(ExceptionCheckTest, StrongGuaranteeFailure) {
+ {
+ DefaultFactory<FailsBasicGuarantee> factory;
+ EXPECT_FALSE(
+ TestExceptionSafety(factory, CallOperator{}, StrongGuarantee(factory)));
+ }
+
+ {
+ DefaultFactory<FollowsBasicGuarantee> factory;
+ EXPECT_FALSE(
+ TestExceptionSafety(factory, CallOperator{}, StrongGuarantee(factory)));
+ }
+}
+
+struct BasicGuaranteeWithExtraInvariants : public NonNegative {
+ // After operator(), i is incremented. If operator() throws, i is set to 9999
+ void operator()() {
+ int old_i = i;
+ i = kExceptionSentinel;
+ ThrowingValue<> bomb;
+ i = ++old_i;
+ }
+
+ static constexpr int kExceptionSentinel = 9999;
+};
+constexpr int BasicGuaranteeWithExtraInvariants::kExceptionSentinel;
+
+TEST(ExceptionCheckTest, BasicGuaranteeWithInvariants) {
+ DefaultFactory<BasicGuaranteeWithExtraInvariants> factory;
+
+ EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
+
+ EXPECT_TRUE(TestExceptionSafety(
+ factory, CallOperator{}, [](BasicGuaranteeWithExtraInvariants* w) {
+ if (w->i == BasicGuaranteeWithExtraInvariants::kExceptionSentinel) {
+ return testing::AssertionSuccess();
+ }
+ return testing::AssertionFailure()
+ << "i should be "
+ << BasicGuaranteeWithExtraInvariants::kExceptionSentinel
+ << ", but is " << w->i;
+ }));
+}
+
+struct FollowsStrongGuarantee : public NonNegative {
+ void operator()() { ThrowingValue<> bomb; }
+};
+
+TEST(ExceptionCheckTest, StrongGuarantee) {
+ DefaultFactory<FollowsStrongGuarantee> factory;
+ EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
+ EXPECT_TRUE(
+ TestExceptionSafety(factory, CallOperator{}, StrongGuarantee(factory)));
+}
+
+struct HasReset : public NonNegative {
+ void operator()() {
+ i = -1;
+ ThrowingValue<> bomb;
+ i = 1;
+ }
+
+ void reset() { i = 0; }
+
+ friend bool AbslCheckInvariants(HasReset* h) {
+ h->reset();
+ return h->i == 0;
+ }
+};
+
+TEST(ExceptionCheckTest, ModifyingChecker) {
+ {
+ DefaultFactory<FollowsBasicGuarantee> factory;
+ EXPECT_FALSE(TestExceptionSafety(
+ factory, CallOperator{},
+ [](FollowsBasicGuarantee* g) {
+ g->i = 1000;
+ return true;
+ },
+ [](FollowsBasicGuarantee* g) { return g->i == 1000; }));
+ }
+ {
+ DefaultFactory<FollowsStrongGuarantee> factory;
+ EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{},
+ [](FollowsStrongGuarantee* g) {
+ ++g->i;
+ return true;
+ },
+ StrongGuarantee(factory)));
+ }
+ {
+ DefaultFactory<HasReset> factory;
+ EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
+ }
+}
+
+struct NonCopyable : public NonNegative {
+ NonCopyable(const NonCopyable&) = delete;
+ NonCopyable() : NonNegative{0} {}
+
+ void operator()() { ThrowingValue<> bomb; }
+};
+
+TEST(ExceptionCheckTest, NonCopyable) {
+ DefaultFactory<NonCopyable> factory;
+ EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
+ EXPECT_TRUE(
+ TestExceptionSafety(factory, CallOperator{}, StrongGuarantee(factory)));
+}
+
+struct NonEqualityComparable : public NonNegative {
+ void operator()() { ThrowingValue<> bomb; }
+
+ void ModifyOnThrow() {
+ ++i;
+ ThrowingValue<> bomb;
+ static_cast<void>(bomb);
+ --i;
+ }
+};
+
+TEST(ExceptionCheckTest, NonEqualityComparable) {
+ DefaultFactory<NonEqualityComparable> factory;
+ auto comp = [](const NonEqualityComparable& a,
+ const NonEqualityComparable& b) { return a.i == b.i; };
+ EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{}));
+ EXPECT_TRUE(TestExceptionSafety(factory, CallOperator{},
+ absl::StrongGuarantee(factory, comp)));
+ EXPECT_FALSE(TestExceptionSafety(
+ factory, [&](NonEqualityComparable* n) { n->ModifyOnThrow(); },
+ absl::StrongGuarantee(factory, comp)));
+}
+
+template <typename T>
+struct ExhaustivenessTester {
+ void operator()() {
+ successes |= 1;
+ T b1;
+ static_cast<void>(b1);
+ successes |= (1 << 1);
+ T b2;
+ static_cast<void>(b2);
+ successes |= (1 << 2);
+ T b3;
+ static_cast<void>(b3);
+ successes |= (1 << 3);
+ }
+
+ bool operator==(const ExhaustivenessTester<ThrowingValue<>>&) const {
+ return true;
+ }
+
+ friend testing::AssertionResult AbslCheckInvariants(ExhaustivenessTester*) {
+ return testing::AssertionSuccess();
+ }
+
+ static unsigned char successes;
+};
+template <typename T>
+unsigned char ExhaustivenessTester<T>::successes = 0;
+
+TEST(ExceptionCheckTest, Exhaustiveness) {
+ DefaultFactory<ExhaustivenessTester<int>> int_factory;
+ EXPECT_TRUE(TestExceptionSafety(int_factory, CallOperator{}));
+ EXPECT_EQ(ExhaustivenessTester<int>::successes, 0xF);
+
+ DefaultFactory<ExhaustivenessTester<ThrowingValue<>>> bomb_factory;
+ EXPECT_TRUE(TestExceptionSafety(bomb_factory, CallOperator{}));
+ EXPECT_EQ(ExhaustivenessTester<ThrowingValue<>>::successes, 0xF);
+
+ ExhaustivenessTester<ThrowingValue<>>::successes = 0;
+ EXPECT_TRUE(TestExceptionSafety(bomb_factory, CallOperator{},
+ StrongGuarantee(bomb_factory)));
+ EXPECT_EQ(ExhaustivenessTester<ThrowingValue<>>::successes, 0xF);
+}
+
+struct LeaksIfCtorThrows : private exceptions_internal::TrackedObject {
+ LeaksIfCtorThrows() : TrackedObject(ABSL_PRETTY_FUNCTION) {
+ ++counter;
+ ThrowingValue<> v;
+ static_cast<void>(v);
+ --counter;
+ }
+ LeaksIfCtorThrows(const LeaksIfCtorThrows&) noexcept
+ : TrackedObject(ABSL_PRETTY_FUNCTION) {}
+ static int counter;
+};
+int LeaksIfCtorThrows::counter = 0;
+
+TEST(ExceptionCheckTest, TestLeakyCtor) {
+ absl::TestThrowingCtor<LeaksIfCtorThrows>();
+ EXPECT_EQ(LeaksIfCtorThrows::counter, 1);
+ LeaksIfCtorThrows::counter = 0;
+}
+
+struct Tracked : private exceptions_internal::TrackedObject {
+ Tracked() : TrackedObject(ABSL_PRETTY_FUNCTION) {}
+};
+
+TEST(AllocInspectorTest, Pass) {
+ AllocInspector javert;
+ Tracked t;
+}
+
+TEST(AllocInspectorTest, NotDestroyed) {
+ absl::aligned_storage_t<sizeof(Tracked), alignof(Tracked)> storage;
+ EXPECT_NONFATAL_FAILURE(
+ {
+ AllocInspector gadget;
+ new (&storage) Tracked;
+ },
+ "not destroyed");
+}
+
+TEST(AllocInspectorTest, DestroyedTwice) {
+ EXPECT_NONFATAL_FAILURE(
+ {
+ Tracked t;
+ t.~Tracked();
+ },
+ "destroyed improperly");
+}
+
+TEST(AllocInspectorTest, ConstructedTwice) {
+ absl::aligned_storage_t<sizeof(Tracked), alignof(Tracked)> storage;
+ EXPECT_NONFATAL_FAILURE(
+ {
+ new (&storage) Tracked;
+ new (&storage) Tracked;
+ },
+ "re-constructed");
+}
+} // namespace
+} // namespace absl
#elif defined(__APPLE__)
// Mac OS X / Darwin features
#include <libkern/OSByteOrder.h>
+#elif defined(__FreeBSD__)
+#include <sys/endian.h>
#elif defined(__GLIBC__)
#include <byteswap.h> // IWYU pragma: export
#endif
--- /dev/null
+#include "absl/base/internal/exception_safety_testing.h"
+
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+exceptions_internal::NoThrowTag no_throw_ctor;
+namespace exceptions_internal {
+
+int countdown = -1;
+
+void MaybeThrow(absl::string_view msg) {
+ if (countdown-- == 0) throw TestException(msg);
+}
+
+testing::AssertionResult FailureMessage(const TestException& e,
+ int countdown) noexcept {
+ return testing::AssertionFailure() << "Exception thrown from " << e.what();
+}
+} // namespace exceptions_internal
+} // namespace absl
--- /dev/null
+// Utilities for testing exception-safety
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <iosfwd>
+#include <string>
+#include <unordered_map>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/pretty_function.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/substitute.h"
+#include "absl/types/optional.h"
+
+namespace absl {
+struct AllocInspector;
+
+// A configuration enum for Throwing*. Operations whose flags are set will
+// throw, everything else won't. This isn't meant to be exhaustive, more flags
+// can always be made in the future.
+enum class NoThrow : uint8_t {
+ kNone = 0,
+ kMoveCtor = 1,
+ kMoveAssign = 1 << 1,
+ kAllocation = 1 << 2,
+ kIntCtor = 1 << 3,
+ kNoThrow = static_cast<uint8_t>(-1)
+};
+
+constexpr NoThrow operator|(NoThrow a, NoThrow b) {
+ using T = absl::underlying_type_t<NoThrow>;
+ return static_cast<NoThrow>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr NoThrow operator&(NoThrow a, NoThrow b) {
+ using T = absl::underlying_type_t<NoThrow>;
+ return static_cast<NoThrow>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+namespace exceptions_internal {
+struct NoThrowTag {};
+
+constexpr bool ThrowingAllowed(NoThrow flags, NoThrow flag) {
+ return !static_cast<bool>(flags & flag);
+}
+
+// A simple exception class. We throw this so that test code can catch
+// exceptions specifically thrown by ThrowingValue.
+class TestException {
+ public:
+ explicit TestException(absl::string_view msg) : msg_(msg) {}
+ absl::string_view what() const { return msg_; }
+
+ private:
+ std::string msg_;
+};
+
+extern int countdown;
+
+void MaybeThrow(absl::string_view msg);
+
+testing::AssertionResult FailureMessage(const TestException& e,
+ int countdown) noexcept;
+
+class TrackedObject {
+ protected:
+ explicit TrackedObject(absl::string_view child_ctor) {
+ if (!GetAllocs().emplace(this, child_ctor).second) {
+ ADD_FAILURE() << "Object at address " << static_cast<void*>(this)
+ << " re-constructed in ctor " << child_ctor;
+ }
+ }
+
+ TrackedObject(const TrackedObject&) = delete;
+ TrackedObject(TrackedObject&&) = delete;
+
+ static std::unordered_map<TrackedObject*, absl::string_view>& GetAllocs() {
+ static auto* m =
+ new std::unordered_map<TrackedObject*, absl::string_view>();
+ return *m;
+ }
+
+ ~TrackedObject() noexcept {
+ if (GetAllocs().erase(this) == 0) {
+ ADD_FAILURE() << "Object at address " << static_cast<void*>(this)
+ << " destroyed improperly";
+ }
+ }
+
+ friend struct ::absl::AllocInspector;
+};
+
+template <typename Factory>
+using FactoryType = typename absl::result_of_t<Factory()>::element_type;
+
+// Returns an optional with the result of the check if op fails, or an empty
+// optional if op passes
+template <typename Factory, typename Op, typename Checker>
+absl::optional<testing::AssertionResult> TestCheckerAtCountdown(
+ Factory factory, const Op& op, int count, const Checker& check) {
+ exceptions_internal::countdown = count;
+ auto t_ptr = factory();
+ absl::optional<testing::AssertionResult> out;
+ try {
+ op(t_ptr.get());
+ } catch (const exceptions_internal::TestException& e) {
+ out.emplace(check(t_ptr.get()));
+ if (!*out) {
+ *out << " caused by exception thrown by " << e.what();
+ }
+ }
+ return out;
+}
+
+template <typename Factory, typename Op, typename Checker>
+int UpdateOut(Factory factory, const Op& op, int count, const Checker& checker,
+ testing::AssertionResult* out) {
+ if (*out) *out = *TestCheckerAtCountdown(factory, op, count, checker);
+ return 0;
+}
+
+// Returns an optional with the result of the check if op fails, or an empty
+// optional if op passes
+template <typename Factory, typename Op, typename... Checkers>
+absl::optional<testing::AssertionResult> TestAtCountdown(
+ Factory factory, const Op& op, int count, const Checkers&... checkers) {
+ // Don't bother with the checkers if the class invariants are already broken.
+ auto out = TestCheckerAtCountdown(
+ factory, op, count,
+ [](FactoryType<Factory>* t_ptr) { return AbslCheckInvariants(t_ptr); });
+ if (!out.has_value()) return out;
+
+ // Run each checker, short circuiting after the first failure
+ int dummy[] = {0, (UpdateOut(factory, op, count, checkers, &*out))...};
+ static_cast<void>(dummy);
+ return out;
+}
+
+template <typename T, typename EqualTo>
+class StrongGuaranteeTester {
+ public:
+ explicit StrongGuaranteeTester(std::unique_ptr<T> t_ptr, EqualTo eq) noexcept
+ : val_(std::move(t_ptr)), eq_(eq) {}
+
+ testing::AssertionResult operator()(T* other) const {
+ return eq_(*val_, *other) ? testing::AssertionSuccess()
+ : testing::AssertionFailure() << "State changed";
+ }
+
+ private:
+ std::unique_ptr<T> val_;
+ EqualTo eq_;
+};
+} // namespace exceptions_internal
+
+extern exceptions_internal::NoThrowTag no_throw_ctor;
+
+// These are useful for tests which just construct objects and make sure there
+// are no leaks.
+inline void SetCountdown() { exceptions_internal::countdown = 0; }
+inline void UnsetCountdown() { exceptions_internal::countdown = -1; }
+
+// A test class which is contextually convertible to bool. The conversion can
+// be instrumented to throw at a controlled time.
+class ThrowingBool {
+ public:
+ ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit)
+ explicit operator bool() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return b_;
+ }
+
+ private:
+ bool b_;
+};
+
+// A testing class instrumented to throw an exception at a controlled time.
+//
+// ThrowingValue implements a slightly relaxed version of the Regular concept --
+// that is it's a value type with the expected semantics. It also implements
+// arithmetic operations. It doesn't implement member and pointer operators
+// like operator-> or operator[].
+//
+// ThrowingValue can be instrumented to have certain operations be noexcept by
+// using compile-time bitfield flag template arguments. That is, to make an
+// ThrowingValue which has a noexcept move constructor and noexcept move
+// assignment, use
+// ThrowingValue<absl::NoThrow::kMoveCtor | absl::NoThrow::kMoveAssign>.
+template <NoThrow Flags = NoThrow::kNone>
+class ThrowingValue : private exceptions_internal::TrackedObject {
+ public:
+ ThrowingValue() : TrackedObject(ABSL_PRETTY_FUNCTION) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = 0;
+ }
+
+ ThrowingValue(const ThrowingValue& other)
+ : TrackedObject(ABSL_PRETTY_FUNCTION) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = other.dummy_;
+ }
+
+ ThrowingValue(ThrowingValue&& other) noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kMoveCtor))
+ : TrackedObject(ABSL_PRETTY_FUNCTION) {
+ if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kMoveCtor)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ }
+
+ explicit ThrowingValue(int i) noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kIntCtor))
+ : TrackedObject(ABSL_PRETTY_FUNCTION) {
+ if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kIntCtor)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = i;
+ }
+
+ ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept
+ : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(i) {}
+
+ // absl expects nothrow destructors
+ ~ThrowingValue() noexcept = default;
+
+ ThrowingValue& operator=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator=(ThrowingValue&& other) noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kMoveAssign)) {
+ if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kMoveAssign)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ return *this;
+ }
+
+ // Arithmetic Operators
+ ThrowingValue operator+(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ + other.dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator+() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator-(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ - other.dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator-() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(-dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue& operator++() {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ ++dummy_;
+ return *this;
+ }
+
+ ThrowingValue operator++(int) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ auto out = ThrowingValue(dummy_, no_throw_ctor);
+ ++dummy_;
+ return out;
+ }
+
+ ThrowingValue& operator--() {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ --dummy_;
+ return *this;
+ }
+
+ ThrowingValue operator--(int) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ auto out = ThrowingValue(dummy_, no_throw_ctor);
+ --dummy_;
+ return out;
+ }
+
+ ThrowingValue operator*(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ * other.dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator/(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ / other.dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator%(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ % other.dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator<<(int shift) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ << shift, no_throw_ctor);
+ }
+
+ ThrowingValue operator>>(int shift) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ >> shift, no_throw_ctor);
+ }
+
+ // Comparison Operators
+ friend ThrowingBool operator==(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ == b.dummy_;
+ }
+ friend ThrowingBool operator!=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ != b.dummy_;
+ }
+ friend ThrowingBool operator<(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ < b.dummy_;
+ }
+ friend ThrowingBool operator<=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ <= b.dummy_;
+ }
+ friend ThrowingBool operator>(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ > b.dummy_;
+ }
+ friend ThrowingBool operator>=(const ThrowingValue& a,
+ const ThrowingValue& b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ >= b.dummy_;
+ }
+
+ // Logical Operators
+ ThrowingBool operator!() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return !dummy_;
+ }
+
+ ThrowingBool operator&&(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return dummy_ && other.dummy_;
+ }
+
+ ThrowingBool operator||(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return dummy_ || other.dummy_;
+ }
+
+ // Bitwise Logical Operators
+ ThrowingValue operator~() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(~dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator&(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ & other.dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator|(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ | other.dummy_, no_throw_ctor);
+ }
+
+ ThrowingValue operator^(const ThrowingValue& other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ ^ other.dummy_, no_throw_ctor);
+ }
+
+ // Compound Assignment operators
+ ThrowingValue& operator+=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ += other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator-=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ -= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator*=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ *= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator/=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ /= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator%=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ %= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator&=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ &= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator|=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ |= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator^=(const ThrowingValue& other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ ^= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue& operator<<=(int shift) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ <<= shift;
+ return *this;
+ }
+
+ ThrowingValue& operator>>=(int shift) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ >>= shift;
+ return *this;
+ }
+
+ // Pointer operators
+ void operator&() const = delete; // NOLINT(runtime/operator)
+
+ // Stream operators
+ friend std::ostream& operator<<(std::ostream& os, const ThrowingValue&) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return os;
+ }
+
+ friend std::istream& operator>>(std::istream& is, const ThrowingValue&) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return is;
+ }
+
+ // Memory management operators
+ // Args.. allows us to overload regular and placement new in one shot
+ template <typename... Args>
+ static void* operator new(size_t s, Args&&... args) noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kAllocation)) {
+ if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kAllocation)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ return ::operator new(s, std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ static void* operator new[](size_t s, Args&&... args) noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kAllocation)) {
+ if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kAllocation)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ return ::operator new[](s, std::forward<Args>(args)...);
+ }
+
+ // Abseil doesn't support throwing overloaded operator delete. These are
+ // provided so a throwing operator-new can clean up after itself.
+ //
+ // We provide both regular and templated operator delete because if only the
+ // templated version is provided as we did with operator new, the compiler has
+ // no way of knowing which overload of operator delete to call. See
+ // http://en.cppreference.com/w/cpp/memory/new/operator_delete and
+ // http://en.cppreference.com/w/cpp/language/delete for the gory details.
+ void operator delete(void* p) noexcept { ::operator delete(p); }
+
+ template <typename... Args>
+ void operator delete(void* p, Args&&... args) noexcept {
+ ::operator delete(p, std::forward<Args>(args)...);
+ }
+
+ void operator delete[](void* p) noexcept { return ::operator delete[](p); }
+
+ template <typename... Args>
+ void operator delete[](void* p, Args&&... args) noexcept {
+ return ::operator delete[](p, std::forward<Args>(args)...);
+ }
+
+ // Non-standard access to the actual contained value. No need for this to
+ // throw.
+ int& Get() noexcept { return dummy_; }
+ const int& Get() const noexcept { return dummy_; }
+
+ private:
+ int dummy_;
+};
+// While not having to do with exceptions, explicitly delete comma operator, to
+// make sure we don't use it on user-supplied types.
+template <NoThrow N, typename T>
+void operator,(const ThrowingValue<N>& ef, T&& t) = delete;
+template <NoThrow N, typename T>
+void operator,(T&& t, const ThrowingValue<N>& ef) = delete;
+
+// An allocator type which is instrumented to throw at a controlled time, or not
+// to throw, using NoThrow. The supported settings are the default of every
+// function which is allowed to throw in a conforming allocator possibly
+// throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS
+// configuration macro.
+template <typename T, NoThrow Flags = NoThrow::kNone>
+class ThrowingAllocator : private exceptions_internal::TrackedObject {
+ static_assert(Flags == NoThrow::kNone || Flags == NoThrow::kNoThrow,
+ "Invalid flag");
+
+ public:
+ using pointer = T*;
+ using const_pointer = const T*;
+ using reference = T&;
+ using const_reference = const T&;
+ using void_pointer = void*;
+ using const_void_pointer = const void*;
+ using value_type = T;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ using is_nothrow = std::integral_constant<bool, Flags == NoThrow::kNoThrow>;
+ using propagate_on_container_copy_assignment = std::true_type;
+ using propagate_on_container_move_assignment = std::true_type;
+ using propagate_on_container_swap = std::true_type;
+ using is_always_equal = std::false_type;
+
+ ThrowingAllocator() : TrackedObject(ABSL_PRETTY_FUNCTION) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = std::make_shared<const int>(next_id_++);
+ }
+
+ template <typename U>
+ ThrowingAllocator( // NOLINT
+ const ThrowingAllocator<U, Flags>& other) noexcept
+ : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(other.State()) {}
+
+ ThrowingAllocator(const ThrowingAllocator& other) noexcept
+ : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(other.State()) {}
+
+ template <typename U>
+ ThrowingAllocator( // NOLINT
+ ThrowingAllocator<U, Flags>&& other) noexcept
+ : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(std::move(other.State())) {}
+
+ ThrowingAllocator(ThrowingAllocator&& other) noexcept
+ : TrackedObject(ABSL_PRETTY_FUNCTION), dummy_(std::move(other.State())) {}
+
+ ~ThrowingAllocator() noexcept = default;
+
+ template <typename U>
+ ThrowingAllocator& operator=(
+ const ThrowingAllocator<U, Flags>& other) noexcept {
+ dummy_ = other.State();
+ return *this;
+ }
+
+ template <typename U>
+ ThrowingAllocator& operator=(ThrowingAllocator<U, Flags>&& other) noexcept {
+ dummy_ = std::move(other.State());
+ return *this;
+ }
+
+ template <typename U>
+ struct rebind {
+ using other = ThrowingAllocator<U, Flags>;
+ };
+
+ pointer allocate(size_type n) noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return static_cast<pointer>(::operator new(n * sizeof(T)));
+ }
+ pointer allocate(size_type n, const_void_pointer) noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+ return allocate(n);
+ }
+
+ void deallocate(pointer ptr, size_type) noexcept {
+ ReadState();
+ ::operator delete(static_cast<void*>(ptr));
+ }
+
+ template <typename U, typename... Args>
+ void construct(U* ptr, Args&&... args) noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ ::new (static_cast<void*>(ptr)) U(std::forward<Args>(args)...);
+ }
+
+ template <typename U>
+ void destroy(U* p) noexcept {
+ ReadState();
+ p->~U();
+ }
+
+ size_type max_size() const
+ noexcept(!exceptions_internal::ThrowingAllowed(Flags,
+ NoThrow::kNoThrow)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return std::numeric_limits<difference_type>::max() / sizeof(value_type);
+ }
+
+ ThrowingAllocator select_on_container_copy_construction() noexcept(
+ !exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+ auto& out = *this;
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return out;
+ }
+
+ template <typename U>
+ bool operator==(const ThrowingAllocator<U, Flags>& other) const noexcept {
+ return dummy_ == other.dummy_;
+ }
+
+ template <typename U>
+ bool operator!=(const ThrowingAllocator<U, Flags>& other) const noexcept {
+ return dummy_ != other.dummy_;
+ }
+
+ template <typename U, NoThrow B>
+ friend class ThrowingAllocator;
+
+ private:
+ const std::shared_ptr<const int>& State() const { return dummy_; }
+ std::shared_ptr<const int>& State() { return dummy_; }
+
+ void ReadState() {
+ // we know that this will never be true, but the compiler doesn't, so this
+ // should safely force a read of the value.
+ if (*dummy_ < 0) std::abort();
+ }
+
+ void ReadStateAndMaybeThrow(absl::string_view msg) const {
+ if (exceptions_internal::ThrowingAllowed(Flags, NoThrow::kNoThrow)) {
+ exceptions_internal::MaybeThrow(
+ absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg));
+ }
+ }
+
+ static int next_id_;
+ std::shared_ptr<const int> dummy_;
+};
+
+template <typename T, NoThrow Throws>
+int ThrowingAllocator<T, Throws>::next_id_ = 0;
+
+// Inspects the constructions and destructions of anything inheriting from
+// TrackedObject. Place this as a member variable in a test fixture to ensure
+// that every ThrowingValue was constructed and destroyed correctly. This also
+// allows us to safely "leak" TrackedObjects, as AllocInspector will destroy
+// everything left over in its destructor.
+struct AllocInspector {
+ AllocInspector() = default;
+ ~AllocInspector() {
+ auto& allocs = exceptions_internal::TrackedObject::GetAllocs();
+ for (const auto& kv : allocs) {
+ ADD_FAILURE() << "Object at address " << static_cast<void*>(kv.first)
+ << " constructed from " << kv.second << " not destroyed";
+ }
+ allocs.clear();
+ }
+};
+
+// Tests for resource leaks by attempting to construct a T using args repeatedly
+// until successful, using the countdown method. Side effects can then be
+// tested for resource leaks. If an AllocInspector is present in the test
+// fixture, then this will also test that memory resources are not leaked as
+// long as T allocates TrackedObjects.
+template <typename T, typename... Args>
+T TestThrowingCtor(Args&&... args) {
+ struct Cleanup {
+ ~Cleanup() { UnsetCountdown(); }
+ };
+ Cleanup c;
+ for (int countdown = 0;; ++countdown) {
+ exceptions_internal::countdown = countdown;
+ try {
+ return T(std::forward<Args>(args)...);
+ } catch (const exceptions_internal::TestException&) {
+ }
+ }
+}
+
+// Tests that performing operation Op on a T follows exception safety
+// guarantees. By default only tests the basic guarantee. There must be a
+// function, AbslCheckInvariants(T*) which returns
+// anything convertible to bool and which makes sure the invariants of the type
+// are upheld. This is called before any of the checkers.
+//
+// Parameters:
+// * TFactory: operator() returns a unique_ptr to the type under test (T). It
+// should always return pointers to values which compare equal.
+// * FunctionFromTPtrToVoid: A functor exercising the function under test. It
+// should take a T* and return void.
+// * Checkers: Any number of functions taking a T* and returning
+// anything contextually convertible to bool. If a testing::AssertionResult
+// is used then the error message is kept. These test invariants related to
+// the operation. To test the strong guarantee, pass
+// absl::StrongGuarantee(factory). A checker may freely modify the passed-in
+// T, for example to make sure the T can be set to a known state.
+template <typename TFactory, typename FunctionFromTPtrToVoid,
+ typename... Checkers>
+testing::AssertionResult TestExceptionSafety(TFactory factory,
+ FunctionFromTPtrToVoid&& op,
+ const Checkers&... checkers) {
+ for (int countdown = 0;; ++countdown) {
+ auto out = exceptions_internal::TestAtCountdown(factory, op, countdown,
+ checkers...);
+ if (!out.has_value()) {
+ UnsetCountdown();
+ return testing::AssertionSuccess();
+ }
+ if (!*out) return *out;
+ }
+}
+
+// Returns a functor to test for the strong exception-safety guarantee.
+// Equality comparisons are made against the T provided by the factory and
+// default to using operator==.
+//
+// Parameters:
+// * TFactory: operator() returns a unique_ptr to the type under test. It
+// should always return pointers to values which compare equal.
+template <typename TFactory, typename EqualTo = std::equal_to<
+ exceptions_internal::FactoryType<TFactory>>>
+exceptions_internal::StrongGuaranteeTester<
+ exceptions_internal::FactoryType<TFactory>, EqualTo>
+StrongGuarantee(TFactory factory, EqualTo eq = EqualTo()) {
+ return exceptions_internal::StrongGuaranteeTester<
+ exceptions_internal::FactoryType<TFactory>, EqualTo>(factory(), eq);
+}
+
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#ifndef _WIN32
+#include <pthread.h>
#include <signal.h>
#include <sys/mman.h>
#include <unistd.h>
SysAllocator::~SysAllocator() {}
void SysAllocator::GetStats(char* buffer, int) { buffer[0] = 0; }
+// Dummy key method to avoid weak vtable.
+void MallocExtensionWriter::UnusedKeyMethod() {}
+
+void StringMallocExtensionWriter::Write(const char* buf, int len) {
+ out_->append(buf, len);
+}
+
// Default implementation -- does nothing
MallocExtension::~MallocExtension() { }
bool MallocExtension::VerifyAllMemory() { return true; }
MallocExtensionWriter() {}
MallocExtensionWriter(const MallocExtensionWriter&) = delete;
MallocExtensionWriter& operator=(const MallocExtensionWriter&) = delete;
+
+ private:
+ virtual void UnusedKeyMethod(); // Dummy key method to avoid weak vtable.
};
// A subclass that writes to the std::string "out". NOTE: The generated
class StringMallocExtensionWriter : public MallocExtensionWriter {
public:
explicit StringMallocExtensionWriter(std::string* out) : out_(out) {}
- virtual void Write(const char* buf, int len) {
- out_->append(buf, len);
- }
+ void Write(const char* buf, int len) override;
private:
std::string* const out_;
--- /dev/null
+#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+
+// ABSL_PRETTY_FUNCTION
+//
+// In C++11, __func__ gives the undecorated name of the current function. That
+// is, "main", not "int main()". Various compilers give extra macros to get the
+// decorated function name, including return type and arguments, to
+// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable
+// version of these macros which forwards to the correct macro on each compiler.
+#if defined(_MSC_VER)
+#define ABSL_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__)
+#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#error "Unsupported compiler"
+#endif
+
+#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
//
// This preprocessor token is also defined in raw_io.cc. If you need to copy
// this, consider moving both to config.h instead.
-#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__)
+#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(__Fuchsia__)
#include <unistd.h>
// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
// for low level operations that want to avoid libc.
-#if defined(__linux__) && !defined(__ANDROID__)
+#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
#include <sys/syscall.h>
#define ABSL_HAVE_SYSCALL_WRITE 1
#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
// Uncommon constructors.
SpinLock::SpinLock(base_internal::SchedulingMode mode)
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
- ABSL_TSAN_MUTEX_CREATE(this, 0);
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
SpinLock::SpinLock(base_internal::LinkerInitialized,
base_internal::SchedulingMode mode) {
- ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_linker_init);
+ ABSL_TSAN_MUTEX_CREATE(this, 0);
if (IsCooperative(mode)) {
InitLinkerInitializedAndCooperative();
}
class LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockCooperative) {
- ABSL_TSAN_MUTEX_CREATE(this, 0);
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
// Special constructor for use with static SpinLock objects. E.g.,
// initializers run.
explicit SpinLock(base_internal::LinkerInitialized) {
// Does nothing; lockword_ is already initialized
- ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_linker_init);
+ ABSL_TSAN_MUTEX_CREATE(this, 0);
}
// Constructors that allow non-cooperative spinlocks to be created for use
SpinLock(base_internal::LinkerInitialized,
base_internal::SchedulingMode mode);
- ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, 0); }
+ ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
// Acquire this SpinLock.
inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
--- /dev/null
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is an Akaros-specific part of spinlock_wait.cc
+
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
+ int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
+ // In Akaros, one must take care not to call anything that could cause a
+ // malloc(), a blocking system call, or a uthread_yield() while holding a
+ // spinlock. Our callers assume will not call into libraries or other
+ // arbitrary code.
+}
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+} // extern "C"
#if defined(_WIN32)
#include "absl/base/internal/spinlock_win32.inc"
+#elif defined(__akaros__)
+#include "absl/base/internal/spinlock_akaros.inc"
#else
#include "absl/base/internal/spinlock_posix.inc"
#endif
#include <sys/syscall.h>
#endif
-#ifdef __APPLE__
+#if defined(__APPLE__) || defined(__FreeBSD__)
#include <sys/sysctl.h>
#endif
return syscall(SYS_gettid);
}
+#elif defined(__akaros__)
+
+pid_t GetTID() {
+ // Akaros has a concept of "vcore context", which is the state the program
+ // is forced into when we need to make a user-level scheduling decision, or
+ // run a signal handler. This is analogous to the interrupt context that a
+ // CPU might enter if it encounters some kind of exception.
+ //
+ // There is no current thread context in vcore context, but we need to give
+ // a reasonable answer if asked for a thread ID (e.g., in a signal handler).
+ // Thread 0 always exists, so if we are in vcore context, we return that.
+ //
+ // Otherwise, we know (since we are using pthreads) that the uthread struct
+ // current_uthread is pointing to is the first element of a
+ // struct pthread_tcb, so we extract and return the thread ID from that.
+ //
+ // TODO(dcross): Akaros anticipates moving the thread ID to the uthread
+ // structure at some point. We should modify this code to remove the cast
+ // when that happens.
+ if (in_vcore_context())
+ return 0;
+ return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
+}
+
#else
// Fallback implementation of GetTID using pthread_getspecific.
#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
-#ifdef THREAD_SANITIZER
+// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+// Macro intended only for internal use.
+//
+// Checks whether LLVM Thread Sanitizer interfaces are available.
+// First made available in LLVM 5.0 (Sep 2017).
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
+#endif
+
+#if defined(THREAD_SANITIZER) && defined(__has_include)
+#if __has_include(<sanitizer/tsan_interface.h>)
+#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
+#endif
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
#include <sanitizer/tsan_interface.h>
#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
#ifndef ABSL_BASE_MACROS_H_
#define ABSL_BASE_MACROS_H_
+#include <cassert>
#include <cstddef>
#include "absl/base/port.h"
// GUARDED_BY()
//
-// Documents if a shared variable/field needs to be protected by a mutex.
-// GUARDED_BY() allows the user to specify a particular mutex that should be
-// held when accessing the annotated variable.
+// Documents if a shared field or global variable needs to be protected by a
+// mutex. GUARDED_BY() allows the user to specify a particular mutex that
+// should be held when accessing the annotated variable.
//
// Example:
//
// Mutex mu;
// int p1 GUARDED_BY(mu);
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
-#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
// PT_GUARDED_BY()
//
// // guarded by `mu2`:
// int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
-#define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded)
// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
//
srcs = ["internal/test_instance_tracker.cc"],
hdrs = ["internal/test_instance_tracker.h"],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
)
cc_test(
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+list(APPEND CONTAINER_PUBLIC_HEADERS
+ "fixed_array.h"
+ "inlined_vector.h"
+)
+
+
+list(APPEND CONTAINER_INTERNAL_HEADERS
+ "internal/test_instance_tracker.h"
+)
+
+
+absl_header_library(
+ TARGET
+ absl_container
+ EXPORT_NAME
+ container
+)
+
+
+#
+## TESTS
+#
+
+list(APPEND TEST_INSTANCE_TRACKER_LIB_SRC
+ "internal/test_instance_tracker.cc"
+ ${CONTAINER_PUBLIC_HEADERS}
+ ${CONTAINER_INTERNAL_HEADERS}
+)
+
+
+absl_library(
+ TARGET
+ test_instance_tracker_lib
+ SOURCES
+ ${TEST_INSTANCE_TRACKER_LIB_SRC}
+ PUBLIC_LIBRARIES
+ absl::container
+ DISABLE_INSTALL
+)
+
+
+
+# test fixed_array_test
+set(FIXED_ARRAY_TEST_SRC "fixed_array_test.cc")
+set(FIXED_ARRAY_TEST_PUBLIC_LIBRARIES absl::base absl_throw_delegate test_instance_tracker_lib)
+
+absl_test(
+ TARGET
+ fixed_array_test
+ SOURCES
+ ${FIXED_ARRAY_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${FIXED_ARRAY_TEST_PUBLIC_LIBRARIES}
+ PRIVATE_COMPILE_FLAGS
+ ${ABSL_EXCEPTIONS_FLAG}
+)
+
+
+
+absl_test(
+ TARGET
+ fixed_array_test_noexceptions
+ SOURCES
+ ${FIXED_ARRAY_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${FIXED_ARRAY_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test inlined_vector_test
+set(INLINED_VECTOR_TEST_SRC "inlined_vector_test.cc")
+set(INLINED_VECTOR_TEST_PUBLIC_LIBRARIES absl::base absl_throw_delegate test_instance_tracker_lib)
+
+absl_test(
+ TARGET
+ inlined_vector_test
+ SOURCES
+ ${INLINED_VECTOR_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${INLINED_VECTOR_TEST_PUBLIC_LIBRARIES}
+)
+
+absl_test(
+ TARGET
+ inlined_vector_test_noexceptions
+ SOURCES
+ ${INLINED_VECTOR_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${INLINED_VECTOR_TEST_PUBLIC_LIBRARIES}
+ PRIVATE_COMPILE_FLAGS
+ ${ABSL_NOEXCEPTION_CXXFLAGS}
+)
+
+
+# test test_instance_tracker_test
+set(TEST_INSTANCE_TRACKER_TEST_SRC "internal/test_instance_tracker_test.cc")
+set(TEST_INSTANCE_TRACKER_TEST_PUBLIC_LIBRARIES absl::base absl_throw_delegate test_instance_tracker_lib)
+
+
+absl_test(
+ TARGET
+ test_instance_tracker_test
+ SOURCES
+ ${TEST_INSTANCE_TRACKER_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${TEST_INSTANCE_TRACKER_TEST_PUBLIC_LIBRARIES}
+)
+
+
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
- InlinedVector() noexcept(noexcept(allocator_type()))
+ InlinedVector() noexcept(
+ std::is_nothrow_default_constructible<allocator_type>::value)
: allocator_and_tag_(allocator_type()) {}
explicit InlinedVector(const allocator_type& alloc) noexcept
InlinedVector(const InlinedVector& v);
InlinedVector(const InlinedVector& v, const allocator_type& alloc);
+ // This move constructor does not allocate and only moves the underlying
+ // objects, so its `noexcept` specification depends on whether moving the
+ // underlying objects can throw or not. We assume
+ // a) move constructors should only throw due to allocation failure and
+ // b) if `value_type`'s move constructor allocates, it uses the same
+ // allocation function as the `InlinedVector`'s allocator, so the move
+ // constructor is non-throwing if the allocator is non-throwing or
+ // `value_type`'s move constructor is specified as `noexcept`.
InlinedVector(InlinedVector&& v) noexcept(
absl::allocator_is_nothrow<allocator_type>::value ||
std::is_nothrow_move_constructible<value_type>::value);
+
+ // This move constructor allocates and also moves the underlying objects, so
+ // its `noexcept` specification depends on whether the allocation can throw
+ // and whether moving the underlying objects can throw. Based on the same
+ // assumptions above, the `noexcept` specification is dominated by whether the
+ // allocation can throw regardless of whether `value_type`'s move constructor
+ // is specified as `noexcept`.
InlinedVector(InlinedVector&& v, const allocator_type& alloc) noexcept(
absl::allocator_is_nothrow<allocator_type>::value);
~InlinedVector() { clear(); }
InlinedVector& operator=(const InlinedVector& v) {
+ if (this == &v) {
+ return *this;
+ }
// Optimized to avoid reallocation.
// Prefer reassignment to copy construction for elements.
if (size() < v.size()) { // grow
// InlinedVector::emplace_back()
//
// Constructs and appends an object to the inlined vector.
+ //
+ // Returns a reference to the inserted element.
template <typename... Args>
- void emplace_back(Args&&... args) {
+ value_type& emplace_back(Args&&... args) {
size_type s = size();
assert(s <= capacity());
if (ABSL_PREDICT_FALSE(s == capacity())) {
- GrowAndEmplaceBack(std::forward<Args>(args)...);
- return;
+ return GrowAndEmplaceBack(std::forward<Args>(args)...);
}
assert(s < capacity());
tag().set_inline_size(s + 1);
space = inlined_space();
}
- Construct(space + s, std::forward<Args>(args)...);
+ return Construct(space + s, std::forward<Args>(args)...);
}
// InlinedVector::push_back()
// portion and the start of the uninitialized portion of the created gap.
// The number of initialized spots is pair.second - pair.first;
// the number of raw spots is n - (pair.second - pair.first).
+ //
+ // Updates the size of the InlinedVector internally.
std::pair<iterator, iterator> ShiftRight(const_iterator position,
size_type n);
}
template <typename... Args>
- void GrowAndEmplaceBack(Args&&... args) {
+ value_type& GrowAndEmplaceBack(Args&&... args) {
assert(size() == capacity());
const size_type s = size();
Allocation new_allocation(allocator(), 2 * capacity());
- Construct(new_allocation.buffer() + s, std::forward<Args>(args)...);
+ value_type& new_element =
+ Construct(new_allocation.buffer() + s, std::forward<Args>(args)...);
UninitializedCopy(std::make_move_iterator(data()),
std::make_move_iterator(data() + s),
new_allocation.buffer());
ResetAllocation(new_allocation, s + 1);
+
+ return new_element;
}
void InitAssign(size_type n);
void InitAssign(size_type n, const value_type& t);
template <typename... Args>
- void Construct(pointer p, Args&&... args) {
+ value_type& Construct(pointer p, Args&&... args) {
AllocatorTraits::construct(allocator(), p, std::forward<Args>(args)...);
+ return *p;
}
template <typename Iter>
emplace_back(std::forward<Args>(args)...);
return end() - 1;
}
- size_type s = size();
- size_type idx = std::distance(cbegin(), position);
- if (s == capacity()) {
- EnlargeBy(1);
- }
- assert(s < capacity());
- iterator pos = begin() + idx; // Set 'pos' to a post-enlarge iterator.
- pointer space;
- if (allocated()) {
- tag().set_allocated_size(s + 1);
- space = allocated_space();
+ T new_t = T(std::forward<Args>(args)...);
+
+ auto range = ShiftRight(position, 1);
+ if (range.first == range.second) {
+ // constructing into uninitialized memory
+ Construct(range.first, std::move(new_t));
} else {
- tag().set_inline_size(s + 1);
- space = inlined_space();
+ // assigning into moved-from object
+ *range.first = T(std::move(new_t));
}
- Construct(space + s, std::move(space[s - 1]));
- std::move_backward(pos, space + s - 1, space + s);
- Destroy(pos, pos + 1);
- Construct(pos, std::forward<Args>(args)...);
- return pos;
+ return range.first;
}
template <typename T, size_t N, typename A>
start_used = pos;
start_raw = pos + new_elements_in_used_space;
}
+ tag().add_size(n);
return std::make_pair(start_used, start_raw);
}
-> iterator {
assert(position >= begin() && position <= end());
if (n == 0) return const_cast<iterator>(position);
+
+ value_type copy = v;
std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
- std::fill(it_pair.first, it_pair.second, v);
- UninitializedFill(it_pair.second, it_pair.first + n, v);
- tag().add_size(n);
+ std::fill(it_pair.first, it_pair.second, copy);
+ UninitializedFill(it_pair.second, it_pair.first + n, copy);
+
return it_pair.first;
}
ForwardIter open_spot = std::next(first, used_spots);
std::copy(first, open_spot, it_pair.first);
UninitializedCopy(open_spot, last, it_pair.second);
- tag().add_size(n);
return it_pair.first;
}
#include "absl/container/inlined_vector.h"
+#include <algorithm>
#include <forward_list>
#include <list>
#include <memory>
absl::InlinedVector<MoveCanThrow, 2>>::value));
}
+TEST(InlinedVectorTest, EmplaceBack) {
+ absl::InlinedVector<std::pair<std::string, int>, 1> v;
+
+ auto& inlined_element = v.emplace_back("answer", 42);
+ EXPECT_EQ(&inlined_element, &v[0]);
+ EXPECT_EQ(inlined_element.first, "answer");
+ EXPECT_EQ(inlined_element.second, 42);
+
+ auto& allocated_element = v.emplace_back("taxicab", 1729);
+ EXPECT_EQ(&allocated_element, &v[1]);
+ EXPECT_EQ(allocated_element.first, "taxicab");
+ EXPECT_EQ(allocated_element.second, 1729);
+}
TEST(IntVec, Insert) {
for (int len = 0; len < 20; len++) {
}
}
+TEST(IntVec, AliasingCopyAssignment) {
+ for (int len = 0; len < 20; ++len) {
+ IntVec original;
+ Fill(&original, len);
+ IntVec dup = original;
+ dup = dup;
+ EXPECT_EQ(dup, original);
+ }
+}
+
TEST(IntVec, MoveConstructorAndAssignment) {
for (int len = 0; len < 20; len++) {
IntVec v_in;
}
}
+class NotTriviallyDestructible {
+ public:
+ NotTriviallyDestructible() : p_(new int(1)) {}
+ explicit NotTriviallyDestructible(int i) : p_(new int(i)) {}
+
+ NotTriviallyDestructible(const NotTriviallyDestructible& other)
+ : p_(new int(*other.p_)) {}
+
+ NotTriviallyDestructible& operator=(const NotTriviallyDestructible& other) {
+ p_ = absl::make_unique<int>(*other.p_);
+ return *this;
+ }
+
+ bool operator==(const NotTriviallyDestructible& other) const {
+ return *p_ == *other.p_;
+ }
+
+ private:
+ std::unique_ptr<int> p_;
+};
+
+TEST(AliasingTest, Emplace) {
+ for (int i = 2; i < 20; ++i) {
+ absl::InlinedVector<NotTriviallyDestructible, 10> vec;
+ for (int j = 0; j < i; ++j) {
+ vec.push_back(NotTriviallyDestructible(j));
+ }
+ vec.emplace(vec.begin(), vec[0]);
+ EXPECT_EQ(vec[0], vec[1]);
+ vec.emplace(vec.begin() + i / 2, vec[i / 2]);
+ EXPECT_EQ(vec[i / 2], vec[i / 2 + 1]);
+ vec.emplace(vec.end() - 1, vec.back());
+ EXPECT_EQ(vec[vec.size() - 2], vec.back());
+ }
+}
+
+TEST(AliasingTest, InsertWithCount) {
+ for (int i = 1; i < 20; ++i) {
+ absl::InlinedVector<NotTriviallyDestructible, 10> vec;
+ for (int j = 0; j < i; ++j) {
+ vec.push_back(NotTriviallyDestructible(j));
+ }
+ for (int n = 0; n < 5; ++n) {
+ // We use back where we can because it's guaranteed to become invalidated
+ vec.insert(vec.begin(), n, vec.back());
+ auto b = vec.begin();
+ EXPECT_TRUE(
+ std::all_of(b, b + n, [&vec](const NotTriviallyDestructible& x) {
+ return x == vec.back();
+ }));
+
+ auto m_idx = vec.size() / 2;
+ vec.insert(vec.begin() + m_idx, n, vec.back());
+ auto m = vec.begin() + m_idx;
+ EXPECT_TRUE(
+ std::all_of(m, m + n, [&vec](const NotTriviallyDestructible& x) {
+ return x == vec.back();
+ }));
+
+ // We want distinct values so the equality test is meaningful,
+ // vec[vec.size() - 1] is also almost always invalidated.
+ auto old_e = vec.size() - 1;
+ auto val = vec[old_e];
+ vec.insert(vec.end(), n, vec[old_e]);
+ auto e = vec.begin() + old_e;
+ EXPECT_TRUE(std::all_of(
+ e, e + n,
+ [&val](const NotTriviallyDestructible& x) { return x == val; }));
+ }
+ }
+}
+
TEST(OverheadTest, Storage) {
// Check for size overhead.
// In particular, ensure that std::allocator doesn't cost anything to store.
"-Wcast-qual",
"-Wconversion-null",
"-Wmissing-declarations",
+ "-Wno-sign-compare",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wunused-local-typedefs",
"-Wno-unused-private-field",
]
+
+# Docs on single flags is preceded by a comment.
+# Docs on groups of flags is preceded by ###.
+
LLVM_FLAGS = [
"-Wall",
"-Wextra",
"-Weverything",
+ # Abseil does not support C++98
"-Wno-c++98-compat-pedantic",
"-Wno-comma",
+ # Turns off all implicit conversion warnings. Most are re-enabled below.
"-Wno-conversion",
"-Wno-covered-switch-default",
"-Wno-deprecated",
"-Wno-float-conversion",
"-Wno-float-equal",
"-Wno-format-nonliteral",
+ # Too aggressive: warns on Clang extensions enclosed in Clang-only code paths.
"-Wno-gcc-compat",
"-Wno-global-constructors",
"-Wno-nested-anon-types",
"-Wno-old-style-cast",
"-Wno-packed",
"-Wno-padded",
+ # Warns on preferred usage of non-POD types such as string_view
"-Wno-range-loop-analysis",
"-Wno-reserved-id-macro",
"-Wno-shorten-64-to-32",
"-Wno-undef",
"-Wno-unknown-warning-option",
"-Wno-unreachable-code",
+ # Causes warnings on include guards
"-Wno-unused-macros",
"-Wno-weak-vtables",
- # flags below are also controlled by -Wconversion which is disabled
+ ###
+ # Implicit conversion warnings turned off by -Wno-conversion
+ # which are re-enabled below.
"-Wbitfield-enum-conversion",
"-Wbool-conversion",
"-Wconstant-conversion",
"-Wnull-conversion",
"-Wobjc-literal-conversion",
"-Wstring-conversion",
+ ###
]
LLVM_TEST_FLAGS = [
"/wd4244", # conversion from 'type1' to 'type2', possible loss of data
"/wd4267", # conversion from 'size_t' to 'type', possible loss of data
"/wd4800", # forcing value to bool 'true' or 'false' (performance warning)
+ "/DNOMINMAX", # Don't define min and max macros (windows.h)
"/DWIN32_LEAN_AND_MEAN", # Don't bloat namespace with incompatible winsock versions.
+ "/D_CRT_SECURE_NO_WARNINGS", # Don't warn about usage of insecure C functions
]
MSVC_TEST_FLAGS = [
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND DEBUGGING_PUBLIC_HEADERS
+ "leak_check.h"
+ "stacktrace.h"
+)
+
+
+list(APPEND DEBUGGING_INTERNAL_HEADERS
+ "internal/address_is_readable.h"
+ "internal/elf_mem_image.h"
+ "internal/stacktrace_config.h"
+ "internal/vdso_support.h"
+)
+
+
+list(APPEND STACKTRACE_SRC
+ "stacktrace.cc"
+ "internal/address_is_readable.cc"
+ "internal/elf_mem_image.cc"
+ "internal/vdso_support.cc"
+ ${DEBUGGING_PUBLIC_HEADERS}
+ ${DEBUGGING_INTERNAL_HEADERS}
+)
+
+absl_library(
+ TARGET
+ absl_stacktrace
+ SOURCES
+ ${STACKTRACE_SRC}
+ EXPORT_NAME
+ stacktrace
+)
+
+
+list(APPEND LEAK_CHECK_SRC
+ "leak_check.cc"
+)
+
+
+# leak_check library
+absl_library(
+ TARGET
+ absl_leak_check
+ SOURCES
+ ${LEAK_CHECK_SRC}
+ PUBLIC_LIBRARIES
+ absl_base
+ EXPORT_NAME
+ leak_check
+)
+
+
+# component target
+absl_header_library(
+ TARGET
+ absl_debugging
+ PUBLIC_LIBRARIES
+ absl_stacktrace absl_leak_check
+ EXPORT_NAME
+ debugging
+)
+
+#
+## TESTS
+#
+
+# test leak_check_test
+list(APPEND LEAK_CHECK_TEST_SRC "leak_check_test.cc")
+
+absl_test(
+ TARGET
+ leak_check_test
+ SOURCES
+ ${LEAK_CHECK_TEST_SRC}
+ PUBLIC_LIBRARIES
+ absl_leak_check
+)
+
} // namespace
-const void *const ElfMemImage::kInvalidBase =
- reinterpret_cast<const void *>(~0L);
+// The value of this variable doesn't matter; it's used only for its
+// unique address.
+const int ElfMemImage::kInvalidBaseSentinel = 0;
ElfMemImage::ElfMemImage(const void *base) {
ABSL_RAW_CHECK(base != kInvalidBase, "bad pointer");
// An in-memory ELF image (may not exist on disk).
class ElfMemImage {
+ private:
+ // Sentinel: there could never be an elf image at &kInvalidBaseSentinel.
+ static const int kInvalidBaseSentinel;
+
public:
// Sentinel: there could never be an elf image at this address.
- static const void *const kInvalidBase;
+ static constexpr const void *const kInvalidBase =
+ static_cast<const void*>(&kInvalidBaseSentinel);
// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
#if ABSL_STACKTRACE_INL_HEADER
#error ABSL_STACKTRACE_INL_HEADER cannot be directly set
#elif defined(__native_client__) || defined(__APPLE__) || \
- defined(__ANDROID__) || defined(__myriad2__) || defined(asmjs__) || \
- defined(__Fuchsia__)
+ defined(__FreeBSD__) || defined(__ANDROID__) || defined(__myriad2__) || \
+ defined(__asmjs__) || defined(__Fuchsia__)
#define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_unimplemented-inl.inc"
return *(sp+2);
#elif defined(_CALL_SYSV)
return *(sp+1);
-#elif defined(__APPLE__) || (defined(__linux__) && defined(__PPC64__))
+#elif defined(__APPLE__) || defined(__FreeBSD__) || \
+ (defined(__linux__) && defined(__PPC64__))
// This check is in case the compiler doesn't define _CALL_AIX/etc.
return *(sp+2);
#elif defined(__linux)
// vuc is a ucontext_t *. We use void* to avoid the use
// of ucontext_t on non-POSIX systems.
static uintptr_t GetFP(const void *vuc) {
-#if defined(__linux__)
+#if !defined(__linux__)
+ static_cast<void>(vuc); // Avoid an unused argument compiler warning.
+#else
if (vuc != nullptr) {
auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
#if defined(__i386__)
#ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h
+#include <errno.h>
#include <fcntl.h>
#include <sys/syscall.h>
#include <unistd.h>
+#if __GLIBC_PREREQ(2, 16) // GLIBC-2.16 implements getauxval.
+#include <sys/auxv.h>
+#endif
+
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/port.h"
namespace absl {
namespace debug_internal {
+ABSL_CONST_INIT
std::atomic<const void *> VDSOSupport::vdso_base_(
debug_internal::ElfMemImage::kInvalidBase);
+
std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
// Finally, even if there is a race here, it is harmless, because
// the operation should be idempotent.
const void *VDSOSupport::Init() {
- if (vdso_base_.load(std::memory_order_relaxed) ==
- debug_internal::ElfMemImage::kInvalidBase) {
- {
- // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
- // on stack, and so glibc works as if VDSO was not present.
- // But going directly to kernel via /proc/self/auxv below bypasses
- // Valgrind zapping. So we check for Valgrind separately.
- if (RunningOnValgrind()) {
- vdso_base_.store(nullptr, std::memory_order_relaxed);
- getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
- return nullptr;
- }
- int fd = open("/proc/self/auxv", O_RDONLY);
- if (fd == -1) {
- // Kernel too old to have a VDSO.
- vdso_base_.store(nullptr, std::memory_order_relaxed);
- getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
- return nullptr;
- }
- ElfW(auxv_t) aux;
- while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
- if (aux.a_type == AT_SYSINFO_EHDR) {
- vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
- std::memory_order_relaxed);
- break;
- }
+ const auto kInvalidBase = debug_internal::ElfMemImage::kInvalidBase;
+#if __GLIBC_PREREQ(2, 16)
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ errno = 0;
+ const void *const sysinfo_ehdr =
+ reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
+ if (errno == 0) {
+ vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
+ }
+ }
+#endif // __GLIBC_PREREQ(2, 16)
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
+ // on stack, and so glibc works as if VDSO was not present.
+ // But going directly to kernel via /proc/self/auxv below bypasses
+ // Valgrind zapping. So we check for Valgrind separately.
+ if (RunningOnValgrind()) {
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+ return nullptr;
+ }
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd == -1) {
+ // Kernel too old to have a VDSO.
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+ return nullptr;
+ }
+ ElfW(auxv_t) aux;
+ while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
+ if (aux.a_type == AT_SYSINFO_EHDR) {
+ vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
+ std::memory_order_relaxed);
+ break;
}
- close(fd);
}
- if (vdso_base_.load(std::memory_order_relaxed) ==
- debug_internal::ElfMemImage::kInvalidBase) {
+ close(fd);
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
// Didn't find AT_SYSINFO_EHDR in auxv[].
vdso_base_.store(nullptr, std::memory_order_relaxed);
}
return syscall(SYS_getcpu, cpu, nullptr, nullptr);
#else
// x86_64 never implemented sys_getcpu(), except as a VDSO call.
+ static_cast<void>(cpu); // Avoid an unused argument compiler warning.
errno = ENOSYS;
return -1;
#endif
name = "memory",
hdrs = ["memory.h"],
copts = ABSL_DEFAULT_COPTS,
- deps = ["//absl/meta:type_traits"],
+ deps = [
+ "//absl/base:core_headers",
+ "//absl/meta:type_traits",
+ ],
)
cc_test(
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND MEMORY_PUBLIC_HEADERS
+ "memory.h"
+)
+
+
+absl_header_library(
+ TARGET
+ absl_memory
+ EXPORT_NAME
+ memory
+)
+
+#
+## TESTS
+#
+
+# test memory_test
+list(APPEND MEMORY_TEST_SRC
+ "memory_test.cc"
+ ${MEMORY_PUBLIC_HEADERS}
+)
+set(MEMORY_TEST_PUBLIC_LIBRARIES absl::base absl::memory)
+
+
+
+absl_test(
+ TARGET
+ memory_test
+ SOURCES
+ ${MEMORY_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${MEMORY_TEST_PUBLIC_LIBRARIES}
+)
+
+
+
#include <type_traits>
#include <utility>
+#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
namespace absl {
// Function Template: WrapUnique()
// -----------------------------------------------------------------------------
//
-// Transfers ownership of a raw pointer to a `std::unique_ptr`. The returned
-// value is a `std::unique_ptr` of deduced type.
+// Adopts ownership from a raw pointer and transfers it to the returned
+// `std::unique_ptr`, whose type is deduced.
//
// Example:
// X* NewX(int, int);
} // namespace memory_internal
+#if __cplusplus >= 201402L || defined(_MSC_VER)
+using std::make_unique;
+#else
// -----------------------------------------------------------------------------
// Function Template: make_unique<T>()
// -----------------------------------------------------------------------------
template <typename T, typename... Args>
typename memory_internal::MakeUniqueResult<T>::invalid make_unique(
Args&&... /* args */) = delete;
+#endif
// -----------------------------------------------------------------------------
// Function Template: RawPtr()
// -----------------------------------------------------------------------------
//
-// Extracts the raw pointer from a pointer-like 'ptr'. `absl::RawPtr` is useful
-// within templates that need to handle a complement of raw pointers,
+// Extracts the raw pointer from a pointer-like value `ptr`. `absl::RawPtr` is
+// useful within templates that need to handle a complement of raw pointers,
// `std::nullptr_t`, and smart pointers.
template <typename T>
auto RawPtr(T&& ptr) -> decltype(&*ptr) {
// Function Template: ShareUniquePtr()
// -----------------------------------------------------------------------------
//
-// Transforms a `std::unique_ptr` rvalue into a `std::shared_ptr`. The returned
-// value is a `std::shared_ptr` of deduced type and ownership is transferred to
-// the shared pointer.
+// Adopts a `std::unique_ptr` rvalue and returns a `std::shared_ptr` of deduced
+// type. Ownership (if any) of the held value is transferred to the returned
+// shared pointer.
//
// Example:
//
// CHECK_EQ(*sp, 10);
// CHECK(up == nullptr);
//
-// Note that this conversion is correct even when T is an array type, although
-// the resulting shared pointer may not be very useful.
+// Note that this conversion is correct even when T is an array type, and more
+// generally it works for *any* deleter of the `unique_ptr` (single-object
+// deleter, array deleter, or any custom deleter), since the deleter is adopted
+// by the shared pointer as well. The deleter is copied (unless it is a
+// reference).
//
// Implements the resolution of [LWG 2415](http://wg21.link/lwg2415), by which a
// null shared pointer does not attempt to call the deleter.
EXPECT_THAT(ArrayWatch::allocs(), ElementsAre(5 * sizeof(ArrayWatch)));
}
+TEST(Make_UniqueTest, NotAmbiguousWithStdMakeUnique) {
+ // Ensure that absl::make_unique is not ambiguous with std::make_unique.
+ // In C++14 mode, the below call to make_unique has both types as candidates.
+ struct TakesStdType {
+ explicit TakesStdType(const std::vector<int> &vec) {}
+ };
+ using absl::make_unique;
+ make_unique<TakesStdType>(std::vector<int>());
+}
+
#if 0
// TODO(billydonahue): Make a proper NC test.
// These tests shouldn't compile.
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND META_PUBLIC_HEADERS
+ "type_traits.h"
+)
+
+
+#
+## TESTS
+#
+
+# test type_traits_test
+list(APPEND TYPE_TRAITS_TEST_SRC
+ "type_traits_test.cc"
+ ${META_PUBLIC_HEADERS}
+)
+
+absl_header_library(
+ TARGET
+ absl_meta
+ EXPORT_NAME
+ meta
+ )
+
+absl_test(
+ TARGET
+ type_traits_test
+ SOURCES
+ ${TYPE_TRAITS_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${TYPE_TRAITS_TEST_PUBLIC_LIBRARIES} absl::meta
+)
+
+
+
//
// Determines whether the passed type `T` is trivially destructable.
//
-// This metafunction is designed to be a drop-in replacement for the C++17
-// `std::is_trivially_destructible()` metafunction.
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_destructible()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
+// fully support C++11, we check whether this yields the same result as the std
+// implementation.
//
// NOTE: the extensions (__has_trivial_xxx) are implemented in gcc (version >=
// 4.3) and clang. Since we are supporting libstdc++ > 4.7, they should always
//
// Determines whether the passed type `T` is trivially default constructible.
//
-// This metafunction is designed to be a drop-in replacement for the C++17
-// `std::is_trivially_default_constructible()` metafunction.
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_default_constructible()` metafunction for platforms that
+// have incomplete C++11 support (such as libstdc++ 4.x). On any platforms that
+// do fully support C++11, we check whether this yields the same result as the
+// std implementation.
//
// NOTE: according to the C++ standard, Section: 20.15.4.3 [meta.unary.prop]
// "The predicate condition for a template specialization is_constructible<T,
//
// Determines whether the passed type `T` is trivially copy constructible.
//
-// This metafunction is designed to be a drop-in replacement for the C++17
-// `std::is_trivially_copy_constructible()` metafunction.
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_copy_constructible()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
+// fully support C++11, we check whether this yields the same result as the std
+// implementation.
//
// NOTE: `T obj(declval<const T&>());` needs to be well-formed and not call any
// nontrivial operation. Nontrivally destructible types will cause the
//
// Determines whether the passed type `T` is trivially copy assignable.
//
-// This metafunction is designed to be a drop-in replacement for the C++17
-// `std::is_trivially_copy_assignable()` metafunction.
+// This metafunction is designed to be a drop-in replacement for the C++11
+// `std::is_trivially_copy_assignable()` metafunction for platforms that have
+// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
+// fully support C++11, we check whether this yields the same result as the std
+// implementation.
//
// NOTE: `is_assignable<T, U>::value` is `true` if the expression
// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
using ::testing::StaticAssertTypeEq;
+template <class T, class U>
+struct simple_pair {
+ T first;
+ U second;
+};
+
struct Dummy {};
TEST(VoidTTest, BasicUsage) {
EXPECT_TRUE(
absl::is_trivially_default_constructible<TrivialDefaultCtor10>::value);
- // Verify that std::pair has non-trivial constructors.
- EXPECT_FALSE(
- (absl::is_trivially_default_constructible<std::pair<int, char*>>::value));
+ // Verify that simple_pair has trivial constructors where applicable.
+ EXPECT_TRUE((absl::is_trivially_default_constructible<
+ simple_pair<int, char*>>::value));
// Verify that types without trivial constructors are
// correctly marked as such.
// types with vtables
EXPECT_FALSE(absl::is_trivially_copy_constructible<Base>::value);
- // Verify that std pair of such types is trivially copy constructible
- EXPECT_TRUE(
- (absl::is_trivially_copy_constructible<std::pair<int, char*>>::value));
+ // Verify that simple_pair of such types is trivially copy constructible
EXPECT_TRUE(
- (absl::is_trivially_copy_constructible<std::pair<int, Trivial>>::value));
+ (absl::is_trivially_copy_constructible<simple_pair<int, char*>>::value));
+ EXPECT_TRUE((
+ absl::is_trivially_copy_constructible<simple_pair<int, Trivial>>::value));
EXPECT_TRUE((absl::is_trivially_copy_constructible<
- std::pair<int, TrivialCopyCtor>>::value));
+ simple_pair<int, TrivialCopyCtor>>::value));
// Verify that arrays are not
typedef int int10[10];
EXPECT_FALSE(absl::is_trivially_copy_constructible<int10>::value);
- // Verify that pairs of types without trivial copy constructors
+ // Verify that simple_pairs of types without trivial copy constructors
// are not marked as trivial.
EXPECT_FALSE((absl::is_trivially_copy_constructible<
- std::pair<int, std::string>>::value));
+ simple_pair<int, std::string>>::value));
EXPECT_FALSE((absl::is_trivially_copy_constructible<
- std::pair<std::string, int>>::value));
+ simple_pair<std::string, int>>::value));
// Verify that types without trivial copy constructors are
// correctly marked as such.
typedef int int10[10];
EXPECT_FALSE(absl::is_trivially_copy_assignable<int10>::value);
- // Verify that std::pair is not trivially assignable
- EXPECT_FALSE(
- (absl::is_trivially_copy_assignable<std::pair<int, char*>>::value));
+ // Verify that simple_pair is trivially assignable
+ EXPECT_TRUE(
+ (absl::is_trivially_copy_assignable<simple_pair<int, char*>>::value));
// Verify that types without trivial copy constructors are
// correctly marked as such.
EXPECT_TRUE(absl::is_trivially_destructible<TrivialDestructor>::value);
EXPECT_FALSE(absl::is_trivially_destructible<NonTrivialDestructor>::value);
- // std::pair of such types is trivial
- EXPECT_TRUE((absl::is_trivially_destructible<std::pair<int, int>>::value));
+ // simple_pair of such types is trivial
+ EXPECT_TRUE((absl::is_trivially_destructible<simple_pair<int, int>>::value));
EXPECT_TRUE((absl::is_trivially_destructible<
- std::pair<Trivial, TrivialDestructor>>::value));
+ simple_pair<Trivial, TrivialDestructor>>::value));
// array of such types is trivial
typedef int int10[10];
cc_library(
name = "int128",
- srcs = ["int128.cc"],
+ srcs = [
+ "int128.cc",
+ "int128_have_intrinsic.inc",
+ "int128_no_intrinsic.inc",
+ ],
hdrs = ["int128.h"],
copts = ABSL_DEFAULT_COPTS,
deps = [
name = "int128_test",
size = "small",
srcs = [
+ "int128_stream_test.cc",
"int128_test.cc",
],
copts = ABSL_TEST_COPTS,
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND NUMERIC_PUBLIC_HEADERS
+ "int128.h"
+)
+
+
+# library 128
+list(APPEND INT128_SRC
+ "int128.cc"
+ ${NUMERIC_PUBLIC_HEADERS}
+)
+absl_library(
+ TARGET
+ absl_int128
+ SOURCES
+ ${INT128_SRC}
+ PUBLIC_LIBRARIES
+ ${INT128_PUBLIC_LIBRARIES}
+ EXPORT_NAME
+ int128
+)
+
+
+absl_header_library(
+ TARGET
+ absl_numeric
+ PUBLIC_LIBRARIES
+ absl::int128
+ EXPORT_NAME
+ numeric
+)
+
+# test int128_test
+set(INT128_TEST_SRC "int128_test.cc")
+set(INT128_TEST_PUBLIC_LIBRARIES absl::numeric absl::base)
+
+absl_test(
+ TARGET
+ int128_test
+ SOURCES
+ ${INT128_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${INT128_TEST_PUBLIC_LIBRARIES}
+)
+
+
+
uint128::uint128(double v) : uint128(Initialize128FromFloat(v)) {}
uint128::uint128(long double v) : uint128(Initialize128FromFloat(v)) {}
-uint128& uint128::operator/=(const uint128& divisor) {
+uint128& uint128::operator/=(uint128 other) {
uint128 quotient = 0;
uint128 remainder = 0;
- DivModImpl(*this, divisor, "ient, &remainder);
+ DivModImpl(*this, other, "ient, &remainder);
*this = quotient;
return *this;
}
-uint128& uint128::operator%=(const uint128& divisor) {
+uint128& uint128::operator%=(uint128 other) {
uint128 quotient = 0;
uint128 remainder = 0;
- DivModImpl(*this, divisor, "ient, &remainder);
+ DivModImpl(*this, other, "ient, &remainder);
*this = remainder;
return *this;
}
-std::ostream& operator<<(std::ostream& o, const uint128& b) {
+std::ostream& operator<<(std::ostream& o, uint128 b) {
std::ios_base::fmtflags flags = o.flags();
// Select a divisor which is the largest power of the base < 2^64.
// Add the requisite padding.
std::streamsize width = o.width(0);
if (static_cast<size_t>(width) > rep.size()) {
- if ((flags & std::ios::adjustfield) == std::ios::left) {
+ std::ios::fmtflags adjustfield = flags & std::ios::adjustfield;
+ if (adjustfield == std::ios::left) {
rep.append(width - rep.size(), o.fill());
+ } else if (adjustfield == std::ios::internal &&
+ (flags & std::ios::showbase) &&
+ (flags & std::ios::basefield) == std::ios::hex && b != 0) {
+ rep.insert(2, width - rep.size(), o.fill());
} else {
rep.insert(0, width - rep.size(), o.fill());
}
//
// Example:
//
-// float y = kuint128max; // Error. uint128 cannot be implicitly converted
-// // to float.
+// float y = absl::kuint128max; // Error. uint128 cannot be implicitly
+// // converted to float.
//
-// uint128 v;
-// uint64_t i = v // Error
-// uint64_t i = static_cast<uint64_t>(v) // OK
+// absl::uint128 v;
+// absl::uint64_t i = v; // Error
+// absl::uint64_t i = static_cast<uint64_t>(v); // OK
//
class alignas(16) uint128 {
public:
constexpr uint128(__int128 v); // NOLINT(runtime/explicit)
constexpr uint128(unsigned __int128 v); // NOLINT(runtime/explicit)
#endif // ABSL_HAVE_INTRINSIC_INT128
- explicit uint128(float v); // NOLINT(runtime/explicit)
- explicit uint128(double v); // NOLINT(runtime/explicit)
- explicit uint128(long double v); // NOLINT(runtime/explicit)
+ explicit uint128(float v);
+ explicit uint128(double v);
+ explicit uint128(long double v);
// Assignment operators from arithmetic types
uint128& operator=(int v);
// Trivial copy constructor, assignment operator and destructor.
// Arithmetic operators.
- uint128& operator+=(const uint128& other);
- uint128& operator-=(const uint128& other);
- uint128& operator*=(const uint128& other);
+ uint128& operator+=(uint128 other);
+ uint128& operator-=(uint128 other);
+ uint128& operator*=(uint128 other);
// Long division/modulo for uint128.
- uint128& operator/=(const uint128& other);
- uint128& operator%=(const uint128& other);
+ uint128& operator/=(uint128 other);
+ uint128& operator%=(uint128 other);
uint128 operator++(int);
uint128 operator--(int);
uint128& operator<<=(int);
uint128& operator>>=(int);
- uint128& operator&=(const uint128& other);
- uint128& operator|=(const uint128& other);
- uint128& operator^=(const uint128& other);
+ uint128& operator&=(uint128 other);
+ uint128& operator|=(uint128 other);
+ uint128& operator^=(uint128 other);
uint128& operator++();
uint128& operator--();
// Uint128Low64()
//
// Returns the lower 64-bit value of a `uint128` value.
- friend uint64_t Uint128Low64(const uint128& v);
+ friend constexpr uint64_t Uint128Low64(uint128 v);
// Uint128High64()
//
// Returns the higher 64-bit value of a `uint128` value.
- friend uint64_t Uint128High64(const uint128& v);
+ friend constexpr uint64_t Uint128High64(uint128 v);
// MakeUInt128()
//
extern const uint128 kuint128max;
// allow uint128 to be logged
-extern std::ostream& operator<<(std::ostream& o, const uint128& b);
+extern std::ostream& operator<<(std::ostream& o, uint128 b);
-// TODO(strel) add operator>>(std::istream&, uint128&)
-
-// Methods to access low and high pieces of 128-bit value.
-uint64_t Uint128Low64(const uint128& v);
-uint64_t Uint128High64(const uint128& v);
+// TODO(strel) add operator>>(std::istream&, uint128)
// TODO(absl-team): Implement signed 128-bit type
// Implementation details follow
// --------------------------------------------------------------------------
-inline constexpr uint128 MakeUint128(uint64_t top, uint64_t bottom) {
+constexpr uint128 MakeUint128(uint64_t top, uint64_t bottom) {
return uint128(top, bottom);
}
// Assignment from integer types.
-inline uint128& uint128::operator=(int v) {
- return *this = uint128(v);
-}
+inline uint128& uint128::operator=(int v) { return *this = uint128(v); }
inline uint128& uint128::operator=(unsigned int v) {
return *this = uint128(v);
// Shift and arithmetic operators.
-inline uint128 operator<<(const uint128& lhs, int amount) {
+inline uint128 operator<<(uint128 lhs, int amount) {
return uint128(lhs) <<= amount;
}
-inline uint128 operator>>(const uint128& lhs, int amount) {
+inline uint128 operator>>(uint128 lhs, int amount) {
return uint128(lhs) >>= amount;
}
-inline uint128 operator+(const uint128& lhs, const uint128& rhs) {
+inline uint128 operator+(uint128 lhs, uint128 rhs) {
return uint128(lhs) += rhs;
}
-inline uint128 operator-(const uint128& lhs, const uint128& rhs) {
+inline uint128 operator-(uint128 lhs, uint128 rhs) {
return uint128(lhs) -= rhs;
}
-inline uint128 operator*(const uint128& lhs, const uint128& rhs) {
+inline uint128 operator*(uint128 lhs, uint128 rhs) {
return uint128(lhs) *= rhs;
}
-inline uint128 operator/(const uint128& lhs, const uint128& rhs) {
+inline uint128 operator/(uint128 lhs, uint128 rhs) {
return uint128(lhs) /= rhs;
}
-inline uint128 operator%(const uint128& lhs, const uint128& rhs) {
+inline uint128 operator%(uint128 lhs, uint128 rhs) {
return uint128(lhs) %= rhs;
}
-inline uint64_t Uint128Low64(const uint128& v) { return v.lo_; }
+constexpr uint64_t Uint128Low64(uint128 v) { return v.lo_; }
-inline uint64_t Uint128High64(const uint128& v) { return v.hi_; }
+constexpr uint64_t Uint128High64(uint128 v) { return v.hi_; }
// Constructors from integer types.
#if defined(ABSL_IS_LITTLE_ENDIAN)
-inline constexpr uint128::uint128(uint64_t top, uint64_t bottom)
+constexpr uint128::uint128(uint64_t top, uint64_t bottom)
: lo_(bottom), hi_(top) {}
-inline constexpr uint128::uint128(int v)
+constexpr uint128::uint128(int v)
: lo_(v), hi_(v < 0 ? std::numeric_limits<uint64_t>::max() : 0) {}
-inline constexpr uint128::uint128(long v) // NOLINT(runtime/int)
+constexpr uint128::uint128(long v) // NOLINT(runtime/int)
: lo_(v), hi_(v < 0 ? std::numeric_limits<uint64_t>::max() : 0) {}
-inline constexpr uint128::uint128(long long v) // NOLINT(runtime/int)
+constexpr uint128::uint128(long long v) // NOLINT(runtime/int)
: lo_(v), hi_(v < 0 ? std::numeric_limits<uint64_t>::max() : 0) {}
-inline constexpr uint128::uint128(unsigned int v) : lo_(v), hi_(0) {}
+constexpr uint128::uint128(unsigned int v) : lo_(v), hi_(0) {}
// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::uint128(unsigned long v) : lo_(v), hi_(0) {}
+constexpr uint128::uint128(unsigned long v) : lo_(v), hi_(0) {}
// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::uint128(unsigned long long v)
- : lo_(v), hi_(0) {}
+constexpr uint128::uint128(unsigned long long v) : lo_(v), hi_(0) {}
#ifdef ABSL_HAVE_INTRINSIC_INT128
-inline constexpr uint128::uint128(__int128 v)
+constexpr uint128::uint128(__int128 v)
: lo_(static_cast<uint64_t>(v & ~uint64_t{0})),
hi_(static_cast<uint64_t>(static_cast<unsigned __int128>(v) >> 64)) {}
-inline constexpr uint128::uint128(unsigned __int128 v)
+constexpr uint128::uint128(unsigned __int128 v)
: lo_(static_cast<uint64_t>(v & ~uint64_t{0})),
hi_(static_cast<uint64_t>(v >> 64)) {}
#endif // ABSL_HAVE_INTRINSIC_INT128
#elif defined(ABSL_IS_BIG_ENDIAN)
-inline constexpr uint128::uint128(uint64_t top, uint64_t bottom)
+constexpr uint128::uint128(uint64_t top, uint64_t bottom)
: hi_(top), lo_(bottom) {}
-inline constexpr uint128::uint128(int v)
+constexpr uint128::uint128(int v)
: hi_(v < 0 ? std::numeric_limits<uint64_t>::max() : 0), lo_(v) {}
-inline constexpr uint128::uint128(long v) // NOLINT(runtime/int)
+constexpr uint128::uint128(long v) // NOLINT(runtime/int)
: hi_(v < 0 ? std::numeric_limits<uint64_t>::max() : 0), lo_(v) {}
-inline constexpr uint128::uint128(long long v) // NOLINT(runtime/int)
+constexpr uint128::uint128(long long v) // NOLINT(runtime/int)
: hi_(v < 0 ? std::numeric_limits<uint64_t>::max() : 0), lo_(v) {}
-inline constexpr uint128::uint128(unsigned int v) : hi_(0), lo_(v) {}
+constexpr uint128::uint128(unsigned int v) : hi_(0), lo_(v) {}
// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::uint128(unsigned long v) : hi_(0), lo_(v) {}
+constexpr uint128::uint128(unsigned long v) : hi_(0), lo_(v) {}
// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::uint128(unsigned long long v)
- : hi_(0), lo_(v) {}
+constexpr uint128::uint128(unsigned long long v) : hi_(0), lo_(v) {}
#ifdef ABSL_HAVE_INTRINSIC_INT128
-inline constexpr uint128::uint128(__int128 v)
+constexpr uint128::uint128(__int128 v)
: hi_(static_cast<uint64_t>(static_cast<unsigned __int128>(v) >> 64)),
lo_(static_cast<uint64_t>(v & ~uint64_t{0})) {}
-inline constexpr uint128::uint128(unsigned __int128 v)
+constexpr uint128::uint128(unsigned __int128 v)
: hi_(static_cast<uint64_t>(v >> 64)),
lo_(static_cast<uint64_t>(v & ~uint64_t{0})) {}
#endif // ABSL_HAVE_INTRINSIC_INT128
// Conversion operators to integer types.
-inline constexpr uint128::operator bool() const {
- return lo_ || hi_;
-}
+constexpr uint128::operator bool() const { return lo_ || hi_; }
-inline constexpr uint128::operator char() const {
- return static_cast<char>(lo_);
-}
+constexpr uint128::operator char() const { return static_cast<char>(lo_); }
-inline constexpr uint128::operator signed char() const {
+constexpr uint128::operator signed char() const {
return static_cast<signed char>(lo_);
}
-inline constexpr uint128::operator unsigned char() const {
+constexpr uint128::operator unsigned char() const {
return static_cast<unsigned char>(lo_);
}
-inline constexpr uint128::operator char16_t() const {
+constexpr uint128::operator char16_t() const {
return static_cast<char16_t>(lo_);
}
-inline constexpr uint128::operator char32_t() const {
+constexpr uint128::operator char32_t() const {
return static_cast<char32_t>(lo_);
}
-inline constexpr uint128::operator wchar_t() const {
+constexpr uint128::operator wchar_t() const {
return static_cast<wchar_t>(lo_);
}
// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::operator short() const {
- return static_cast<short>(lo_); // NOLINT(runtime/int)
-}
+constexpr uint128::operator short() const { return static_cast<short>(lo_); }
-// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::operator unsigned short() const {
- return static_cast<unsigned short>(lo_); // NOLINT(runtime/int)
+constexpr uint128::operator unsigned short() const { // NOLINT(runtime/int)
+ return static_cast<unsigned short>(lo_); // NOLINT(runtime/int)
}
-inline constexpr uint128::operator int() const {
- return static_cast<int>(lo_);
-}
+constexpr uint128::operator int() const { return static_cast<int>(lo_); }
-inline constexpr uint128::operator unsigned int() const {
+constexpr uint128::operator unsigned int() const {
return static_cast<unsigned int>(lo_);
}
// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::operator long() const {
- return static_cast<long>(lo_); // NOLINT(runtime/int)
-}
+constexpr uint128::operator long() const { return static_cast<long>(lo_); }
-// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::operator unsigned long() const {
- return static_cast<unsigned long>(lo_); // NOLINT(runtime/int)
+constexpr uint128::operator unsigned long() const { // NOLINT(runtime/int)
+ return static_cast<unsigned long>(lo_); // NOLINT(runtime/int)
}
-// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::operator long long() const {
- return static_cast<long long>(lo_); // NOLINT(runtime/int)
+constexpr uint128::operator long long() const { // NOLINT(runtime/int)
+ return static_cast<long long>(lo_); // NOLINT(runtime/int)
}
-// NOLINTNEXTLINE(runtime/int)
-inline constexpr uint128::operator unsigned long long() const {
- return static_cast<unsigned long long>(lo_); // NOLINT(runtime/int)
+constexpr uint128::operator unsigned long long() const { // NOLINT(runtime/int)
+ return static_cast<unsigned long long>(lo_); // NOLINT(runtime/int)
}
#ifdef ABSL_HAVE_INTRINSIC_INT128
-inline constexpr uint128::operator __int128() const {
+constexpr uint128::operator __int128() const {
return (static_cast<__int128>(hi_) << 64) + lo_;
}
-inline constexpr uint128::operator unsigned __int128() const {
+constexpr uint128::operator unsigned __int128() const {
return (static_cast<unsigned __int128>(hi_) << 64) + lo_;
}
#endif // ABSL_HAVE_INTRINSIC_INT128
// Comparison operators.
-inline bool operator==(const uint128& lhs, const uint128& rhs) {
+inline bool operator==(uint128 lhs, uint128 rhs) {
return (Uint128Low64(lhs) == Uint128Low64(rhs) &&
Uint128High64(lhs) == Uint128High64(rhs));
}
-inline bool operator!=(const uint128& lhs, const uint128& rhs) {
+inline bool operator!=(uint128 lhs, uint128 rhs) {
return !(lhs == rhs);
}
-inline bool operator<(const uint128& lhs, const uint128& rhs) {
+inline bool operator<(uint128 lhs, uint128 rhs) {
return (Uint128High64(lhs) == Uint128High64(rhs))
? (Uint128Low64(lhs) < Uint128Low64(rhs))
: (Uint128High64(lhs) < Uint128High64(rhs));
}
-inline bool operator>(const uint128& lhs, const uint128& rhs) {
+inline bool operator>(uint128 lhs, uint128 rhs) {
return (Uint128High64(lhs) == Uint128High64(rhs))
? (Uint128Low64(lhs) > Uint128Low64(rhs))
: (Uint128High64(lhs) > Uint128High64(rhs));
}
-inline bool operator<=(const uint128& lhs, const uint128& rhs) {
+inline bool operator<=(uint128 lhs, uint128 rhs) {
return (Uint128High64(lhs) == Uint128High64(rhs))
? (Uint128Low64(lhs) <= Uint128Low64(rhs))
: (Uint128High64(lhs) <= Uint128High64(rhs));
}
-inline bool operator>=(const uint128& lhs, const uint128& rhs) {
+inline bool operator>=(uint128 lhs, uint128 rhs) {
return (Uint128High64(lhs) == Uint128High64(rhs))
? (Uint128Low64(lhs) >= Uint128Low64(rhs))
: (Uint128High64(lhs) >= Uint128High64(rhs));
// Unary operators.
-inline uint128 operator-(const uint128& val) {
+inline uint128 operator-(uint128 val) {
const uint64_t hi_flip = ~Uint128High64(val);
const uint64_t lo_flip = ~Uint128Low64(val);
const uint64_t lo_add = lo_flip + 1;
return MakeUint128(hi_flip, lo_add);
}
-inline bool operator!(const uint128& val) {
+inline bool operator!(uint128 val) {
return !Uint128High64(val) && !Uint128Low64(val);
}
// Logical operators.
-inline uint128 operator~(const uint128& val) {
+inline uint128 operator~(uint128 val) {
return MakeUint128(~Uint128High64(val), ~Uint128Low64(val));
}
-inline uint128 operator|(const uint128& lhs, const uint128& rhs) {
+inline uint128 operator|(uint128 lhs, uint128 rhs) {
return MakeUint128(Uint128High64(lhs) | Uint128High64(rhs),
Uint128Low64(lhs) | Uint128Low64(rhs));
}
-inline uint128 operator&(const uint128& lhs, const uint128& rhs) {
+inline uint128 operator&(uint128 lhs, uint128 rhs) {
return MakeUint128(Uint128High64(lhs) & Uint128High64(rhs),
Uint128Low64(lhs) & Uint128Low64(rhs));
}
-inline uint128 operator^(const uint128& lhs, const uint128& rhs) {
+inline uint128 operator^(uint128 lhs, uint128 rhs) {
return MakeUint128(Uint128High64(lhs) ^ Uint128High64(rhs),
Uint128Low64(lhs) ^ Uint128Low64(rhs));
}
-inline uint128& uint128::operator|=(const uint128& other) {
+inline uint128& uint128::operator|=(uint128 other) {
hi_ |= other.hi_;
lo_ |= other.lo_;
return *this;
}
-inline uint128& uint128::operator&=(const uint128& other) {
+inline uint128& uint128::operator&=(uint128 other) {
hi_ &= other.hi_;
lo_ &= other.lo_;
return *this;
}
-inline uint128& uint128::operator^=(const uint128& other) {
+inline uint128& uint128::operator^=(uint128 other) {
hi_ ^= other.hi_;
lo_ ^= other.lo_;
return *this;
return *this;
}
-inline uint128& uint128::operator+=(const uint128& other) {
+inline uint128& uint128::operator+=(uint128 other) {
hi_ += other.hi_;
uint64_t lolo = lo_ + other.lo_;
if (lolo < lo_)
return *this;
}
-inline uint128& uint128::operator-=(const uint128& other) {
+inline uint128& uint128::operator-=(uint128 other) {
hi_ -= other.hi_;
if (other.lo_ > lo_) --hi_;
lo_ -= other.lo_;
return *this;
}
-inline uint128& uint128::operator*=(const uint128& other) {
+inline uint128& uint128::operator*=(uint128 other) {
#if defined(ABSL_HAVE_INTRINSIC_INT128)
// TODO(strel) Remove once alignment issues are resolved and unsigned __int128
// can be used for uint128 storage.
return *this;
}
+#if defined(ABSL_HAVE_INTRINSIC_INT128)
+#include "absl/numeric/int128_have_intrinsic.inc"
+#else // ABSL_HAVE_INTRINSIC_INT128
+#include "absl/numeric/int128_no_intrinsic.inc"
+#endif // ABSL_HAVE_INTRINSIC_INT128
+
} // namespace absl
#endif // ABSL_NUMERIC_INT128_H_
// This file will contain :int128 implementation details that depend on internal
// representation when ABSL_HAVE_INTRINSIC_INT128 is defined. This file will be
-// included by int128.h.
\ No newline at end of file
+// included by int128.h.
--- /dev/null
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/numeric/int128.h"
+
+#include <sstream>
+#include <string>
+
+#include "gtest/gtest.h"
+
+namespace {
+
+struct Uint128TestCase {
+ absl::uint128 value;
+ std::ios_base::fmtflags flags;
+ std::streamsize width;
+ const char* expected;
+};
+
+constexpr char kFill = '_';
+
+std::string StreamFormatToString(std::ios_base::fmtflags flags,
+ std::streamsize width) {
+ std::vector<const char*> flagstr;
+ switch (flags & std::ios::basefield) {
+ case std::ios::dec:
+ flagstr.push_back("std::ios::dec");
+ break;
+ case std::ios::oct:
+ flagstr.push_back("std::ios::oct");
+ break;
+ case std::ios::hex:
+ flagstr.push_back("std::ios::hex");
+ break;
+ default: // basefield not specified
+ break;
+ }
+ switch (flags & std::ios::adjustfield) {
+ case std::ios::left:
+ flagstr.push_back("std::ios::left");
+ break;
+ case std::ios::internal:
+ flagstr.push_back("std::ios::internal");
+ break;
+ case std::ios::right:
+ flagstr.push_back("std::ios::right");
+ break;
+ default: // adjustfield not specified
+ break;
+ }
+ if (flags & std::ios::uppercase) flagstr.push_back("std::ios::uppercase");
+ if (flags & std::ios::showbase) flagstr.push_back("std::ios::showbase");
+ if (flags & std::ios::showpos) flagstr.push_back("std::ios::showpos");
+
+ std::ostringstream msg;
+ msg << "\n StreamFormatToString(test_case.flags, test_case.width)\n "
+ "flags: ";
+ if (!flagstr.empty()) {
+ for (size_t i = 0; i < flagstr.size() - 1; ++i) msg << flagstr[i] << " | ";
+ msg << flagstr.back();
+ } else {
+ msg << "(default)";
+ }
+ msg << "\n width: " << width << "\n fill: '" << kFill << "'";
+ return msg.str();
+}
+
+void CheckUint128Case(const Uint128TestCase& test_case) {
+ std::ostringstream os;
+ os.flags(test_case.flags);
+ os.width(test_case.width);
+ os.fill(kFill);
+ os << test_case.value;
+ SCOPED_TRACE(StreamFormatToString(test_case.flags, test_case.width));
+ EXPECT_EQ(test_case.expected, os.str());
+}
+
+constexpr std::ios::fmtflags kDec = std::ios::dec;
+constexpr std::ios::fmtflags kOct = std::ios::oct;
+constexpr std::ios::fmtflags kHex = std::ios::hex;
+constexpr std::ios::fmtflags kLeft = std::ios::left;
+constexpr std::ios::fmtflags kInt = std::ios::internal;
+constexpr std::ios::fmtflags kRight = std::ios::right;
+constexpr std::ios::fmtflags kUpper = std::ios::uppercase;
+constexpr std::ios::fmtflags kBase = std::ios::showbase;
+constexpr std::ios::fmtflags kPos = std::ios::showpos;
+
+TEST(Uint128, OStreamValueTest) {
+ CheckUint128Case({1, kDec, /*width = */ 0, "1"});
+ CheckUint128Case({1, kOct, /*width = */ 0, "1"});
+ CheckUint128Case({1, kHex, /*width = */ 0, "1"});
+ CheckUint128Case({9, kDec, /*width = */ 0, "9"});
+ CheckUint128Case({9, kOct, /*width = */ 0, "11"});
+ CheckUint128Case({9, kHex, /*width = */ 0, "9"});
+ CheckUint128Case({12345, kDec, /*width = */ 0, "12345"});
+ CheckUint128Case({12345, kOct, /*width = */ 0, "30071"});
+ CheckUint128Case({12345, kHex, /*width = */ 0, "3039"});
+ CheckUint128Case(
+ {0x8000000000000000, kDec, /*width = */ 0, "9223372036854775808"});
+ CheckUint128Case(
+ {0x8000000000000000, kOct, /*width = */ 0, "1000000000000000000000"});
+ CheckUint128Case(
+ {0x8000000000000000, kHex, /*width = */ 0, "8000000000000000"});
+ CheckUint128Case({std::numeric_limits<uint64_t>::max(), kDec,
+ /*width = */ 0, "18446744073709551615"});
+ CheckUint128Case({std::numeric_limits<uint64_t>::max(), kOct,
+ /*width = */ 0, "1777777777777777777777"});
+ CheckUint128Case({std::numeric_limits<uint64_t>::max(), kHex,
+ /*width = */ 0, "ffffffffffffffff"});
+ CheckUint128Case(
+ {absl::MakeUint128(1, 0), kDec, /*width = */ 0, "18446744073709551616"});
+ CheckUint128Case({absl::MakeUint128(1, 0), kOct, /*width = */ 0,
+ "2000000000000000000000"});
+ CheckUint128Case(
+ {absl::MakeUint128(1, 0), kHex, /*width = */ 0, "10000000000000000"});
+ CheckUint128Case({absl::MakeUint128(0x8000000000000000, 0), kDec,
+ /*width = */ 0, "170141183460469231731687303715884105728"});
+ CheckUint128Case({absl::MakeUint128(0x8000000000000000, 0), kOct,
+ /*width = */ 0,
+ "2000000000000000000000000000000000000000000"});
+ CheckUint128Case({absl::MakeUint128(0x8000000000000000, 0), kHex,
+ /*width = */ 0, "80000000000000000000000000000000"});
+ CheckUint128Case({absl::kuint128max, kDec, /*width = */ 0,
+ "340282366920938463463374607431768211455"});
+ CheckUint128Case({absl::kuint128max, kOct, /*width = */ 0,
+ "3777777777777777777777777777777777777777777"});
+ CheckUint128Case({absl::kuint128max, kHex, /*width = */ 0,
+ "ffffffffffffffffffffffffffffffff"});
+}
+
+std::vector<Uint128TestCase> GetUint128FormatCases();
+
+TEST(Uint128, OStreamFormatTest) {
+ for (const Uint128TestCase& test_case : GetUint128FormatCases()) {
+ CheckUint128Case(test_case);
+ }
+}
+
+std::vector<Uint128TestCase> GetUint128FormatCases() {
+ return {
+ {0, std::ios_base::fmtflags(), /*width = */ 0, "0"},
+ {0, std::ios_base::fmtflags(), /*width = */ 6, "_____0"},
+ {0, kPos, /*width = */ 0, "0"},
+ {0, kPos, /*width = */ 6, "_____0"},
+ {0, kBase, /*width = */ 0, "0"},
+ {0, kBase, /*width = */ 6, "_____0"},
+ {0, kBase | kPos, /*width = */ 0, "0"},
+ {0, kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kUpper, /*width = */ 0, "0"},
+ {0, kUpper, /*width = */ 6, "_____0"},
+ {0, kUpper | kPos, /*width = */ 0, "0"},
+ {0, kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kUpper | kBase, /*width = */ 0, "0"},
+ {0, kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kLeft, /*width = */ 0, "0"},
+ {0, kLeft, /*width = */ 6, "0_____"},
+ {0, kLeft | kPos, /*width = */ 0, "0"},
+ {0, kLeft | kPos, /*width = */ 6, "0_____"},
+ {0, kLeft | kBase, /*width = */ 0, "0"},
+ {0, kLeft | kBase, /*width = */ 6, "0_____"},
+ {0, kLeft | kBase | kPos, /*width = */ 0, "0"},
+ {0, kLeft | kBase | kPos, /*width = */ 6, "0_____"},
+ {0, kLeft | kUpper, /*width = */ 0, "0"},
+ {0, kLeft | kUpper, /*width = */ 6, "0_____"},
+ {0, kLeft | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kLeft | kUpper | kPos, /*width = */ 6, "0_____"},
+ {0, kLeft | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kLeft | kUpper | kBase, /*width = */ 6, "0_____"},
+ {0, kLeft | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kLeft | kUpper | kBase | kPos, /*width = */ 6, "0_____"},
+ {0, kInt, /*width = */ 0, "0"},
+ {0, kInt, /*width = */ 6, "_____0"},
+ {0, kInt | kPos, /*width = */ 0, "0"},
+ {0, kInt | kPos, /*width = */ 6, "_____0"},
+ {0, kInt | kBase, /*width = */ 0, "0"},
+ {0, kInt | kBase, /*width = */ 6, "_____0"},
+ {0, kInt | kBase | kPos, /*width = */ 0, "0"},
+ {0, kInt | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kInt | kUpper, /*width = */ 0, "0"},
+ {0, kInt | kUpper, /*width = */ 6, "_____0"},
+ {0, kInt | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kInt | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kInt | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kInt | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kInt | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kInt | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kRight, /*width = */ 0, "0"},
+ {0, kRight, /*width = */ 6, "_____0"},
+ {0, kRight | kPos, /*width = */ 0, "0"},
+ {0, kRight | kPos, /*width = */ 6, "_____0"},
+ {0, kRight | kBase, /*width = */ 0, "0"},
+ {0, kRight | kBase, /*width = */ 6, "_____0"},
+ {0, kRight | kBase | kPos, /*width = */ 0, "0"},
+ {0, kRight | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kRight | kUpper, /*width = */ 0, "0"},
+ {0, kRight | kUpper, /*width = */ 6, "_____0"},
+ {0, kRight | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kRight | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kRight | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kRight | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kRight | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kRight | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kDec, /*width = */ 0, "0"},
+ {0, kDec, /*width = */ 6, "_____0"},
+ {0, kDec | kPos, /*width = */ 0, "0"},
+ {0, kDec | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kBase, /*width = */ 0, "0"},
+ {0, kDec | kBase, /*width = */ 6, "_____0"},
+ {0, kDec | kBase | kPos, /*width = */ 0, "0"},
+ {0, kDec | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kUpper, /*width = */ 0, "0"},
+ {0, kDec | kUpper, /*width = */ 6, "_____0"},
+ {0, kDec | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kDec | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kDec | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kDec | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kDec | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kLeft, /*width = */ 0, "0"},
+ {0, kDec | kLeft, /*width = */ 6, "0_____"},
+ {0, kDec | kLeft | kPos, /*width = */ 0, "0"},
+ {0, kDec | kLeft | kPos, /*width = */ 6, "0_____"},
+ {0, kDec | kLeft | kBase, /*width = */ 0, "0"},
+ {0, kDec | kLeft | kBase, /*width = */ 6, "0_____"},
+ {0, kDec | kLeft | kBase | kPos, /*width = */ 0, "0"},
+ {0, kDec | kLeft | kBase | kPos, /*width = */ 6, "0_____"},
+ {0, kDec | kLeft | kUpper, /*width = */ 0, "0"},
+ {0, kDec | kLeft | kUpper, /*width = */ 6, "0_____"},
+ {0, kDec | kLeft | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kDec | kLeft | kUpper | kPos, /*width = */ 6, "0_____"},
+ {0, kDec | kLeft | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kDec | kLeft | kUpper | kBase, /*width = */ 6, "0_____"},
+ {0, kDec | kLeft | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kDec | kLeft | kUpper | kBase | kPos, /*width = */ 6, "0_____"},
+ {0, kDec | kInt, /*width = */ 0, "0"},
+ {0, kDec | kInt, /*width = */ 6, "_____0"},
+ {0, kDec | kInt | kPos, /*width = */ 0, "0"},
+ {0, kDec | kInt | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kInt | kBase, /*width = */ 0, "0"},
+ {0, kDec | kInt | kBase, /*width = */ 6, "_____0"},
+ {0, kDec | kInt | kBase | kPos, /*width = */ 0, "0"},
+ {0, kDec | kInt | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kInt | kUpper, /*width = */ 0, "0"},
+ {0, kDec | kInt | kUpper, /*width = */ 6, "_____0"},
+ {0, kDec | kInt | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kDec | kInt | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kInt | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kDec | kInt | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kDec | kInt | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kDec | kInt | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kRight, /*width = */ 0, "0"},
+ {0, kDec | kRight, /*width = */ 6, "_____0"},
+ {0, kDec | kRight | kPos, /*width = */ 0, "0"},
+ {0, kDec | kRight | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kRight | kBase, /*width = */ 0, "0"},
+ {0, kDec | kRight | kBase, /*width = */ 6, "_____0"},
+ {0, kDec | kRight | kBase | kPos, /*width = */ 0, "0"},
+ {0, kDec | kRight | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kRight | kUpper, /*width = */ 0, "0"},
+ {0, kDec | kRight | kUpper, /*width = */ 6, "_____0"},
+ {0, kDec | kRight | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kDec | kRight | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kDec | kRight | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kDec | kRight | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kDec | kRight | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kDec | kRight | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kOct, /*width = */ 0, "0"},
+ {0, kOct, /*width = */ 6, "_____0"},
+ {0, kOct | kPos, /*width = */ 0, "0"},
+ {0, kOct | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kBase, /*width = */ 0, "0"},
+ {0, kOct | kBase, /*width = */ 6, "_____0"},
+ {0, kOct | kBase | kPos, /*width = */ 0, "0"},
+ {0, kOct | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kUpper, /*width = */ 0, "0"},
+ {0, kOct | kUpper, /*width = */ 6, "_____0"},
+ {0, kOct | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kOct | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kOct | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kOct | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kOct | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kLeft, /*width = */ 0, "0"},
+ {0, kOct | kLeft, /*width = */ 6, "0_____"},
+ {0, kOct | kLeft | kPos, /*width = */ 0, "0"},
+ {0, kOct | kLeft | kPos, /*width = */ 6, "0_____"},
+ {0, kOct | kLeft | kBase, /*width = */ 0, "0"},
+ {0, kOct | kLeft | kBase, /*width = */ 6, "0_____"},
+ {0, kOct | kLeft | kBase | kPos, /*width = */ 0, "0"},
+ {0, kOct | kLeft | kBase | kPos, /*width = */ 6, "0_____"},
+ {0, kOct | kLeft | kUpper, /*width = */ 0, "0"},
+ {0, kOct | kLeft | kUpper, /*width = */ 6, "0_____"},
+ {0, kOct | kLeft | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kOct | kLeft | kUpper | kPos, /*width = */ 6, "0_____"},
+ {0, kOct | kLeft | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kOct | kLeft | kUpper | kBase, /*width = */ 6, "0_____"},
+ {0, kOct | kLeft | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kOct | kLeft | kUpper | kBase | kPos, /*width = */ 6, "0_____"},
+ {0, kOct | kInt, /*width = */ 0, "0"},
+ {0, kOct | kInt, /*width = */ 6, "_____0"},
+ {0, kOct | kInt | kPos, /*width = */ 0, "0"},
+ {0, kOct | kInt | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kInt | kBase, /*width = */ 0, "0"},
+ {0, kOct | kInt | kBase, /*width = */ 6, "_____0"},
+ {0, kOct | kInt | kBase | kPos, /*width = */ 0, "0"},
+ {0, kOct | kInt | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kInt | kUpper, /*width = */ 0, "0"},
+ {0, kOct | kInt | kUpper, /*width = */ 6, "_____0"},
+ {0, kOct | kInt | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kOct | kInt | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kInt | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kOct | kInt | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kOct | kInt | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kOct | kInt | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kRight, /*width = */ 0, "0"},
+ {0, kOct | kRight, /*width = */ 6, "_____0"},
+ {0, kOct | kRight | kPos, /*width = */ 0, "0"},
+ {0, kOct | kRight | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kRight | kBase, /*width = */ 0, "0"},
+ {0, kOct | kRight | kBase, /*width = */ 6, "_____0"},
+ {0, kOct | kRight | kBase | kPos, /*width = */ 0, "0"},
+ {0, kOct | kRight | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kRight | kUpper, /*width = */ 0, "0"},
+ {0, kOct | kRight | kUpper, /*width = */ 6, "_____0"},
+ {0, kOct | kRight | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kOct | kRight | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kOct | kRight | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kOct | kRight | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kOct | kRight | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kOct | kRight | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kHex, /*width = */ 0, "0"},
+ {0, kHex, /*width = */ 6, "_____0"},
+ {0, kHex | kPos, /*width = */ 0, "0"},
+ {0, kHex | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kBase, /*width = */ 0, "0"},
+ {0, kHex | kBase, /*width = */ 6, "_____0"},
+ {0, kHex | kBase | kPos, /*width = */ 0, "0"},
+ {0, kHex | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kUpper, /*width = */ 0, "0"},
+ {0, kHex | kUpper, /*width = */ 6, "_____0"},
+ {0, kHex | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kHex | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kHex | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kHex | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kHex | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kLeft, /*width = */ 0, "0"},
+ {0, kHex | kLeft, /*width = */ 6, "0_____"},
+ {0, kHex | kLeft | kPos, /*width = */ 0, "0"},
+ {0, kHex | kLeft | kPos, /*width = */ 6, "0_____"},
+ {0, kHex | kLeft | kBase, /*width = */ 0, "0"},
+ {0, kHex | kLeft | kBase, /*width = */ 6, "0_____"},
+ {0, kHex | kLeft | kBase | kPos, /*width = */ 0, "0"},
+ {0, kHex | kLeft | kBase | kPos, /*width = */ 6, "0_____"},
+ {0, kHex | kLeft | kUpper, /*width = */ 0, "0"},
+ {0, kHex | kLeft | kUpper, /*width = */ 6, "0_____"},
+ {0, kHex | kLeft | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kHex | kLeft | kUpper | kPos, /*width = */ 6, "0_____"},
+ {0, kHex | kLeft | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kHex | kLeft | kUpper | kBase, /*width = */ 6, "0_____"},
+ {0, kHex | kLeft | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kHex | kLeft | kUpper | kBase | kPos, /*width = */ 6, "0_____"},
+ {0, kHex | kInt, /*width = */ 0, "0"},
+ {0, kHex | kInt, /*width = */ 6, "_____0"},
+ {0, kHex | kInt | kPos, /*width = */ 0, "0"},
+ {0, kHex | kInt | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kInt | kBase, /*width = */ 0, "0"},
+ {0, kHex | kInt | kBase, /*width = */ 6, "_____0"},
+ {0, kHex | kInt | kBase | kPos, /*width = */ 0, "0"},
+ {0, kHex | kInt | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kInt | kUpper, /*width = */ 0, "0"},
+ {0, kHex | kInt | kUpper, /*width = */ 6, "_____0"},
+ {0, kHex | kInt | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kHex | kInt | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kInt | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kHex | kInt | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kHex | kInt | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kHex | kInt | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kRight, /*width = */ 0, "0"},
+ {0, kHex | kRight, /*width = */ 6, "_____0"},
+ {0, kHex | kRight | kPos, /*width = */ 0, "0"},
+ {0, kHex | kRight | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kRight | kBase, /*width = */ 0, "0"},
+ {0, kHex | kRight | kBase, /*width = */ 6, "_____0"},
+ {0, kHex | kRight | kBase | kPos, /*width = */ 0, "0"},
+ {0, kHex | kRight | kBase | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kRight | kUpper, /*width = */ 0, "0"},
+ {0, kHex | kRight | kUpper, /*width = */ 6, "_____0"},
+ {0, kHex | kRight | kUpper | kPos, /*width = */ 0, "0"},
+ {0, kHex | kRight | kUpper | kPos, /*width = */ 6, "_____0"},
+ {0, kHex | kRight | kUpper | kBase, /*width = */ 0, "0"},
+ {0, kHex | kRight | kUpper | kBase, /*width = */ 6, "_____0"},
+ {0, kHex | kRight | kUpper | kBase | kPos, /*width = */ 0, "0"},
+ {0, kHex | kRight | kUpper | kBase | kPos, /*width = */ 6, "_____0"},
+ {37, std::ios_base::fmtflags(), /*width = */ 0, "37"},
+ {37, std::ios_base::fmtflags(), /*width = */ 6, "____37"},
+ {37, kPos, /*width = */ 0, "37"},
+ {37, kPos, /*width = */ 6, "____37"},
+ {37, kBase, /*width = */ 0, "37"},
+ {37, kBase, /*width = */ 6, "____37"},
+ {37, kBase | kPos, /*width = */ 0, "37"},
+ {37, kBase | kPos, /*width = */ 6, "____37"},
+ {37, kUpper, /*width = */ 0, "37"},
+ {37, kUpper, /*width = */ 6, "____37"},
+ {37, kUpper | kPos, /*width = */ 0, "37"},
+ {37, kUpper | kPos, /*width = */ 6, "____37"},
+ {37, kUpper | kBase, /*width = */ 0, "37"},
+ {37, kUpper | kBase, /*width = */ 6, "____37"},
+ {37, kUpper | kBase | kPos, /*width = */ 0, "37"},
+ {37, kUpper | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kLeft, /*width = */ 0, "37"},
+ {37, kLeft, /*width = */ 6, "37____"},
+ {37, kLeft | kPos, /*width = */ 0, "37"},
+ {37, kLeft | kPos, /*width = */ 6, "37____"},
+ {37, kLeft | kBase, /*width = */ 0, "37"},
+ {37, kLeft | kBase, /*width = */ 6, "37____"},
+ {37, kLeft | kBase | kPos, /*width = */ 0, "37"},
+ {37, kLeft | kBase | kPos, /*width = */ 6, "37____"},
+ {37, kLeft | kUpper, /*width = */ 0, "37"},
+ {37, kLeft | kUpper, /*width = */ 6, "37____"},
+ {37, kLeft | kUpper | kPos, /*width = */ 0, "37"},
+ {37, kLeft | kUpper | kPos, /*width = */ 6, "37____"},
+ {37, kLeft | kUpper | kBase, /*width = */ 0, "37"},
+ {37, kLeft | kUpper | kBase, /*width = */ 6, "37____"},
+ {37, kLeft | kUpper | kBase | kPos, /*width = */ 0, "37"},
+ {37, kLeft | kUpper | kBase | kPos, /*width = */ 6, "37____"},
+ {37, kInt, /*width = */ 0, "37"},
+ {37, kInt, /*width = */ 6, "____37"},
+ {37, kInt | kPos, /*width = */ 0, "37"},
+ {37, kInt | kPos, /*width = */ 6, "____37"},
+ {37, kInt | kBase, /*width = */ 0, "37"},
+ {37, kInt | kBase, /*width = */ 6, "____37"},
+ {37, kInt | kBase | kPos, /*width = */ 0, "37"},
+ {37, kInt | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kInt | kUpper, /*width = */ 0, "37"},
+ {37, kInt | kUpper, /*width = */ 6, "____37"},
+ {37, kInt | kUpper | kPos, /*width = */ 0, "37"},
+ {37, kInt | kUpper | kPos, /*width = */ 6, "____37"},
+ {37, kInt | kUpper | kBase, /*width = */ 0, "37"},
+ {37, kInt | kUpper | kBase, /*width = */ 6, "____37"},
+ {37, kInt | kUpper | kBase | kPos, /*width = */ 0, "37"},
+ {37, kInt | kUpper | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kRight, /*width = */ 0, "37"},
+ {37, kRight, /*width = */ 6, "____37"},
+ {37, kRight | kPos, /*width = */ 0, "37"},
+ {37, kRight | kPos, /*width = */ 6, "____37"},
+ {37, kRight | kBase, /*width = */ 0, "37"},
+ {37, kRight | kBase, /*width = */ 6, "____37"},
+ {37, kRight | kBase | kPos, /*width = */ 0, "37"},
+ {37, kRight | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kRight | kUpper, /*width = */ 0, "37"},
+ {37, kRight | kUpper, /*width = */ 6, "____37"},
+ {37, kRight | kUpper | kPos, /*width = */ 0, "37"},
+ {37, kRight | kUpper | kPos, /*width = */ 6, "____37"},
+ {37, kRight | kUpper | kBase, /*width = */ 0, "37"},
+ {37, kRight | kUpper | kBase, /*width = */ 6, "____37"},
+ {37, kRight | kUpper | kBase | kPos, /*width = */ 0, "37"},
+ {37, kRight | kUpper | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kDec, /*width = */ 0, "37"},
+ {37, kDec, /*width = */ 6, "____37"},
+ {37, kDec | kPos, /*width = */ 0, "37"},
+ {37, kDec | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kBase, /*width = */ 0, "37"},
+ {37, kDec | kBase, /*width = */ 6, "____37"},
+ {37, kDec | kBase | kPos, /*width = */ 0, "37"},
+ {37, kDec | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kUpper, /*width = */ 0, "37"},
+ {37, kDec | kUpper, /*width = */ 6, "____37"},
+ {37, kDec | kUpper | kPos, /*width = */ 0, "37"},
+ {37, kDec | kUpper | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kUpper | kBase, /*width = */ 0, "37"},
+ {37, kDec | kUpper | kBase, /*width = */ 6, "____37"},
+ {37, kDec | kUpper | kBase | kPos, /*width = */ 0, "37"},
+ {37, kDec | kUpper | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kLeft, /*width = */ 0, "37"},
+ {37, kDec | kLeft, /*width = */ 6, "37____"},
+ {37, kDec | kLeft | kPos, /*width = */ 0, "37"},
+ {37, kDec | kLeft | kPos, /*width = */ 6, "37____"},
+ {37, kDec | kLeft | kBase, /*width = */ 0, "37"},
+ {37, kDec | kLeft | kBase, /*width = */ 6, "37____"},
+ {37, kDec | kLeft | kBase | kPos, /*width = */ 0, "37"},
+ {37, kDec | kLeft | kBase | kPos, /*width = */ 6, "37____"},
+ {37, kDec | kLeft | kUpper, /*width = */ 0, "37"},
+ {37, kDec | kLeft | kUpper, /*width = */ 6, "37____"},
+ {37, kDec | kLeft | kUpper | kPos, /*width = */ 0, "37"},
+ {37, kDec | kLeft | kUpper | kPos, /*width = */ 6, "37____"},
+ {37, kDec | kLeft | kUpper | kBase, /*width = */ 0, "37"},
+ {37, kDec | kLeft | kUpper | kBase, /*width = */ 6, "37____"},
+ {37, kDec | kLeft | kUpper | kBase | kPos, /*width = */ 0, "37"},
+ {37, kDec | kLeft | kUpper | kBase | kPos, /*width = */ 6, "37____"},
+ {37, kDec | kInt, /*width = */ 0, "37"},
+ {37, kDec | kInt, /*width = */ 6, "____37"},
+ {37, kDec | kInt | kPos, /*width = */ 0, "37"},
+ {37, kDec | kInt | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kInt | kBase, /*width = */ 0, "37"},
+ {37, kDec | kInt | kBase, /*width = */ 6, "____37"},
+ {37, kDec | kInt | kBase | kPos, /*width = */ 0, "37"},
+ {37, kDec | kInt | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kInt | kUpper, /*width = */ 0, "37"},
+ {37, kDec | kInt | kUpper, /*width = */ 6, "____37"},
+ {37, kDec | kInt | kUpper | kPos, /*width = */ 0, "37"},
+ {37, kDec | kInt | kUpper | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kInt | kUpper | kBase, /*width = */ 0, "37"},
+ {37, kDec | kInt | kUpper | kBase, /*width = */ 6, "____37"},
+ {37, kDec | kInt | kUpper | kBase | kPos, /*width = */ 0, "37"},
+ {37, kDec | kInt | kUpper | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kRight, /*width = */ 0, "37"},
+ {37, kDec | kRight, /*width = */ 6, "____37"},
+ {37, kDec | kRight | kPos, /*width = */ 0, "37"},
+ {37, kDec | kRight | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kRight | kBase, /*width = */ 0, "37"},
+ {37, kDec | kRight | kBase, /*width = */ 6, "____37"},
+ {37, kDec | kRight | kBase | kPos, /*width = */ 0, "37"},
+ {37, kDec | kRight | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kRight | kUpper, /*width = */ 0, "37"},
+ {37, kDec | kRight | kUpper, /*width = */ 6, "____37"},
+ {37, kDec | kRight | kUpper | kPos, /*width = */ 0, "37"},
+ {37, kDec | kRight | kUpper | kPos, /*width = */ 6, "____37"},
+ {37, kDec | kRight | kUpper | kBase, /*width = */ 0, "37"},
+ {37, kDec | kRight | kUpper | kBase, /*width = */ 6, "____37"},
+ {37, kDec | kRight | kUpper | kBase | kPos, /*width = */ 0, "37"},
+ {37, kDec | kRight | kUpper | kBase | kPos, /*width = */ 6, "____37"},
+ {37, kOct, /*width = */ 0, "45"},
+ {37, kOct, /*width = */ 6, "____45"},
+ {37, kOct | kPos, /*width = */ 0, "45"},
+ {37, kOct | kPos, /*width = */ 6, "____45"},
+ {37, kOct | kBase, /*width = */ 0, "045"},
+ {37, kOct | kBase, /*width = */ 6, "___045"},
+ {37, kOct | kBase | kPos, /*width = */ 0, "045"},
+ {37, kOct | kBase | kPos, /*width = */ 6, "___045"},
+ {37, kOct | kUpper, /*width = */ 0, "45"},
+ {37, kOct | kUpper, /*width = */ 6, "____45"},
+ {37, kOct | kUpper | kPos, /*width = */ 0, "45"},
+ {37, kOct | kUpper | kPos, /*width = */ 6, "____45"},
+ {37, kOct | kUpper | kBase, /*width = */ 0, "045"},
+ {37, kOct | kUpper | kBase, /*width = */ 6, "___045"},
+ {37, kOct | kUpper | kBase | kPos, /*width = */ 0, "045"},
+ {37, kOct | kUpper | kBase | kPos, /*width = */ 6, "___045"},
+ {37, kOct | kLeft, /*width = */ 0, "45"},
+ {37, kOct | kLeft, /*width = */ 6, "45____"},
+ {37, kOct | kLeft | kPos, /*width = */ 0, "45"},
+ {37, kOct | kLeft | kPos, /*width = */ 6, "45____"},
+ {37, kOct | kLeft | kBase, /*width = */ 0, "045"},
+ {37, kOct | kLeft | kBase, /*width = */ 6, "045___"},
+ {37, kOct | kLeft | kBase | kPos, /*width = */ 0, "045"},
+ {37, kOct | kLeft | kBase | kPos, /*width = */ 6, "045___"},
+ {37, kOct | kLeft | kUpper, /*width = */ 0, "45"},
+ {37, kOct | kLeft | kUpper, /*width = */ 6, "45____"},
+ {37, kOct | kLeft | kUpper | kPos, /*width = */ 0, "45"},
+ {37, kOct | kLeft | kUpper | kPos, /*width = */ 6, "45____"},
+ {37, kOct | kLeft | kUpper | kBase, /*width = */ 0, "045"},
+ {37, kOct | kLeft | kUpper | kBase, /*width = */ 6, "045___"},
+ {37, kOct | kLeft | kUpper | kBase | kPos, /*width = */ 0, "045"},
+ {37, kOct | kLeft | kUpper | kBase | kPos, /*width = */ 6, "045___"},
+ {37, kOct | kInt, /*width = */ 0, "45"},
+ {37, kOct | kInt, /*width = */ 6, "____45"},
+ {37, kOct | kInt | kPos, /*width = */ 0, "45"},
+ {37, kOct | kInt | kPos, /*width = */ 6, "____45"},
+ {37, kOct | kInt | kBase, /*width = */ 0, "045"},
+ {37, kOct | kInt | kBase, /*width = */ 6, "___045"},
+ {37, kOct | kInt | kBase | kPos, /*width = */ 0, "045"},
+ {37, kOct | kInt | kBase | kPos, /*width = */ 6, "___045"},
+ {37, kOct | kInt | kUpper, /*width = */ 0, "45"},
+ {37, kOct | kInt | kUpper, /*width = */ 6, "____45"},
+ {37, kOct | kInt | kUpper | kPos, /*width = */ 0, "45"},
+ {37, kOct | kInt | kUpper | kPos, /*width = */ 6, "____45"},
+ {37, kOct | kInt | kUpper | kBase, /*width = */ 0, "045"},
+ {37, kOct | kInt | kUpper | kBase, /*width = */ 6, "___045"},
+ {37, kOct | kInt | kUpper | kBase | kPos, /*width = */ 0, "045"},
+ {37, kOct | kInt | kUpper | kBase | kPos, /*width = */ 6, "___045"},
+ {37, kOct | kRight, /*width = */ 0, "45"},
+ {37, kOct | kRight, /*width = */ 6, "____45"},
+ {37, kOct | kRight | kPos, /*width = */ 0, "45"},
+ {37, kOct | kRight | kPos, /*width = */ 6, "____45"},
+ {37, kOct | kRight | kBase, /*width = */ 0, "045"},
+ {37, kOct | kRight | kBase, /*width = */ 6, "___045"},
+ {37, kOct | kRight | kBase | kPos, /*width = */ 0, "045"},
+ {37, kOct | kRight | kBase | kPos, /*width = */ 6, "___045"},
+ {37, kOct | kRight | kUpper, /*width = */ 0, "45"},
+ {37, kOct | kRight | kUpper, /*width = */ 6, "____45"},
+ {37, kOct | kRight | kUpper | kPos, /*width = */ 0, "45"},
+ {37, kOct | kRight | kUpper | kPos, /*width = */ 6, "____45"},
+ {37, kOct | kRight | kUpper | kBase, /*width = */ 0, "045"},
+ {37, kOct | kRight | kUpper | kBase, /*width = */ 6, "___045"},
+ {37, kOct | kRight | kUpper | kBase | kPos, /*width = */ 0, "045"},
+ {37, kOct | kRight | kUpper | kBase | kPos, /*width = */ 6, "___045"},
+ {37, kHex, /*width = */ 0, "25"},
+ {37, kHex, /*width = */ 6, "____25"},
+ {37, kHex | kPos, /*width = */ 0, "25"},
+ {37, kHex | kPos, /*width = */ 6, "____25"},
+ {37, kHex | kBase, /*width = */ 0, "0x25"},
+ {37, kHex | kBase, /*width = */ 6, "__0x25"},
+ {37, kHex | kBase | kPos, /*width = */ 0, "0x25"},
+ {37, kHex | kBase | kPos, /*width = */ 6, "__0x25"},
+ {37, kHex | kUpper, /*width = */ 0, "25"},
+ {37, kHex | kUpper, /*width = */ 6, "____25"},
+ {37, kHex | kUpper | kPos, /*width = */ 0, "25"},
+ {37, kHex | kUpper | kPos, /*width = */ 6, "____25"},
+ {37, kHex | kUpper | kBase, /*width = */ 0, "0X25"},
+ {37, kHex | kUpper | kBase, /*width = */ 6, "__0X25"},
+ {37, kHex | kUpper | kBase | kPos, /*width = */ 0, "0X25"},
+ {37, kHex | kUpper | kBase | kPos, /*width = */ 6, "__0X25"},
+ {37, kHex | kLeft, /*width = */ 0, "25"},
+ {37, kHex | kLeft, /*width = */ 6, "25____"},
+ {37, kHex | kLeft | kPos, /*width = */ 0, "25"},
+ {37, kHex | kLeft | kPos, /*width = */ 6, "25____"},
+ {37, kHex | kLeft | kBase, /*width = */ 0, "0x25"},
+ {37, kHex | kLeft | kBase, /*width = */ 6, "0x25__"},
+ {37, kHex | kLeft | kBase | kPos, /*width = */ 0, "0x25"},
+ {37, kHex | kLeft | kBase | kPos, /*width = */ 6, "0x25__"},
+ {37, kHex | kLeft | kUpper, /*width = */ 0, "25"},
+ {37, kHex | kLeft | kUpper, /*width = */ 6, "25____"},
+ {37, kHex | kLeft | kUpper | kPos, /*width = */ 0, "25"},
+ {37, kHex | kLeft | kUpper | kPos, /*width = */ 6, "25____"},
+ {37, kHex | kLeft | kUpper | kBase, /*width = */ 0, "0X25"},
+ {37, kHex | kLeft | kUpper | kBase, /*width = */ 6, "0X25__"},
+ {37, kHex | kLeft | kUpper | kBase | kPos, /*width = */ 0, "0X25"},
+ {37, kHex | kLeft | kUpper | kBase | kPos, /*width = */ 6, "0X25__"},
+ {37, kHex | kInt, /*width = */ 0, "25"},
+ {37, kHex | kInt, /*width = */ 6, "____25"},
+ {37, kHex | kInt | kPos, /*width = */ 0, "25"},
+ {37, kHex | kInt | kPos, /*width = */ 6, "____25"},
+ {37, kHex | kInt | kBase, /*width = */ 0, "0x25"},
+ {37, kHex | kInt | kBase, /*width = */ 6, "0x__25"},
+ {37, kHex | kInt | kBase | kPos, /*width = */ 0, "0x25"},
+ {37, kHex | kInt | kBase | kPos, /*width = */ 6, "0x__25"},
+ {37, kHex | kInt | kUpper, /*width = */ 0, "25"},
+ {37, kHex | kInt | kUpper, /*width = */ 6, "____25"},
+ {37, kHex | kInt | kUpper | kPos, /*width = */ 0, "25"},
+ {37, kHex | kInt | kUpper | kPos, /*width = */ 6, "____25"},
+ {37, kHex | kInt | kUpper | kBase, /*width = */ 0, "0X25"},
+ {37, kHex | kInt | kUpper | kBase, /*width = */ 6, "0X__25"},
+ {37, kHex | kInt | kUpper | kBase | kPos, /*width = */ 0, "0X25"},
+ {37, kHex | kInt | kUpper | kBase | kPos, /*width = */ 6, "0X__25"},
+ {37, kHex | kRight, /*width = */ 0, "25"},
+ {37, kHex | kRight, /*width = */ 6, "____25"},
+ {37, kHex | kRight | kPos, /*width = */ 0, "25"},
+ {37, kHex | kRight | kPos, /*width = */ 6, "____25"},
+ {37, kHex | kRight | kBase, /*width = */ 0, "0x25"},
+ {37, kHex | kRight | kBase, /*width = */ 6, "__0x25"},
+ {37, kHex | kRight | kBase | kPos, /*width = */ 0, "0x25"},
+ {37, kHex | kRight | kBase | kPos, /*width = */ 6, "__0x25"},
+ {37, kHex | kRight | kUpper, /*width = */ 0, "25"},
+ {37, kHex | kRight | kUpper, /*width = */ 6, "____25"},
+ {37, kHex | kRight | kUpper | kPos, /*width = */ 0, "25"},
+ {37, kHex | kRight | kUpper | kPos, /*width = */ 6, "____25"},
+ {37, kHex | kRight | kUpper | kBase, /*width = */ 0, "0X25"},
+ {37, kHex | kRight | kUpper | kBase, /*width = */ 6, "__0X25"},
+ {37, kHex | kRight | kUpper | kBase | kPos, /*width = */ 0, "0X25"},
+ {37, kHex | kRight | kUpper | kBase | kPos, /*width = */ 6, "__0X25"}};
+}
+
+} // namespace
#include <algorithm>
#include <limits>
#include <random>
-#include <sstream>
#include <type_traits>
#include <utility>
#include <vector>
}
#endif // ABSL_HAVE_INTRINSIC_INT128
+TEST(Uint128, TrivialTraitsTest) {
+ static_assert(absl::is_trivially_default_constructible<absl::uint128>::value,
+ "");
+ static_assert(absl::is_trivially_copy_constructible<absl::uint128>::value,
+ "");
+ static_assert(absl::is_trivially_copy_assignable<absl::uint128>::value, "");
+ static_assert(std::is_trivially_destructible<absl::uint128>::value, "");
+}
+
TEST(Uint128, AllTests) {
absl::uint128 zero = 0;
absl::uint128 one = 1;
EXPECT_EQ(minus_two, absl::MakeUint128(-1, -2));
}
-TEST(Uint128, Traits) {
- EXPECT_TRUE(absl::is_trivially_copy_constructible<absl::uint128>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<absl::uint128>::value);
- EXPECT_TRUE(std::is_trivially_destructible<absl::uint128>::value);
-}
-
-TEST(Uint128, OStream) {
- struct {
- absl::uint128 val;
- std::ios_base::fmtflags flags;
- std::streamsize width;
- char fill;
- const char* rep;
- } cases[] = {
- // zero with different bases
- {absl::uint128(0), std::ios::dec, 0, '_', "0"},
- {absl::uint128(0), std::ios::oct, 0, '_', "0"},
- {absl::uint128(0), std::ios::hex, 0, '_', "0"},
- // crossover between lo_ and hi_
- {absl::MakeUint128(0, -1), std::ios::dec, 0, '_', "18446744073709551615"},
- {absl::MakeUint128(0, -1), std::ios::oct, 0, '_',
- "1777777777777777777777"},
- {absl::MakeUint128(0, -1), std::ios::hex, 0, '_', "ffffffffffffffff"},
- {absl::MakeUint128(1, 0), std::ios::dec, 0, '_', "18446744073709551616"},
- {absl::MakeUint128(1, 0), std::ios::oct, 0, '_',
- "2000000000000000000000"},
- {absl::MakeUint128(1, 0), std::ios::hex, 0, '_', "10000000000000000"},
- // just the top bit
- {absl::MakeUint128(0x8000000000000000, 0), std::ios::dec, 0, '_',
- "170141183460469231731687303715884105728"},
- {absl::MakeUint128(0x8000000000000000, 0), std::ios::oct, 0, '_',
- "2000000000000000000000000000000000000000000"},
- {absl::MakeUint128(0x8000000000000000, 0), std::ios::hex, 0, '_',
- "80000000000000000000000000000000"},
- // maximum absl::uint128 value
- {absl::MakeUint128(-1, -1), std::ios::dec, 0, '_',
- "340282366920938463463374607431768211455"},
- {absl::MakeUint128(-1, -1), std::ios::oct, 0, '_',
- "3777777777777777777777777777777777777777777"},
- {absl::MakeUint128(-1, -1), std::ios::hex, 0, '_',
- "ffffffffffffffffffffffffffffffff"},
- // uppercase
- {absl::MakeUint128(-1, -1), std::ios::hex | std::ios::uppercase, 0, '_',
- "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"},
- // showbase
- {absl::uint128(1), std::ios::dec | std::ios::showbase, 0, '_', "1"},
- {absl::uint128(1), std::ios::oct | std::ios::showbase, 0, '_', "01"},
- {absl::uint128(1), std::ios::hex | std::ios::showbase, 0, '_', "0x1"},
- // showbase does nothing on zero
- {absl::uint128(0), std::ios::dec | std::ios::showbase, 0, '_', "0"},
- {absl::uint128(0), std::ios::oct | std::ios::showbase, 0, '_', "0"},
- {absl::uint128(0), std::ios::hex | std::ios::showbase, 0, '_', "0"},
- // showpos does nothing on unsigned types
- {absl::uint128(1), std::ios::dec | std::ios::showpos, 0, '_', "1"},
- // padding
- {absl::uint128(9), std::ios::dec, 6, '_', "_____9"},
- {absl::uint128(12345), std::ios::dec, 6, '_', "_12345"},
- // left adjustment
- {absl::uint128(9), std::ios::dec | std::ios::left, 6, '_', "9_____"},
- {absl::uint128(12345), std::ios::dec | std::ios::left, 6, '_', "12345_"},
- };
- for (const auto& test_case : cases) {
- std::ostringstream os;
- os.flags(test_case.flags);
- os.width(test_case.width);
- os.fill(test_case.fill);
- os << test_case.val;
- EXPECT_EQ(test_case.rep, os.str());
- }
-}
-
} // namespace
package(
default_visibility = ["//visibility:public"],
- features = [
- "parse_headers",
- "header_modules",
- ],
+ features = ["parse_headers"],
)
licenses(["notice"]) # Apache 2.0
cc_library(
name = "internal",
srcs = [
+ "internal/ostringstream.cc",
"internal/utf8.cc",
],
hdrs = [
size = "small",
srcs = ["match_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",
"internal/escaping_test_common.inc",
],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
size = "small",
srcs = ["ascii_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
"internal/memutil_test.cc",
],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
"internal/utf8_test.cc",
],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":internal",
":strings",
size = "small",
srcs = ["string_view_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:config",
size = "small",
srcs = ["substitute_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
size = "small",
srcs = ["str_replace_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",
name = "str_split_test",
srcs = ["str_split_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
size = "small",
srcs = ["internal/ostringstream_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":internal",
"@com_google_googletest//:gtest_main",
"internal/resize_uninitialized_test.cc",
],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
"//absl/base:core_headers",
"//absl/meta:type_traits",
size = "small",
srcs = ["str_join_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
size = "small",
srcs = ["str_cat_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
tags = [
"no_test_loonix",
],
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base",
size = "small",
srcs = ["strip_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+list(APPEND STRINGS_PUBLIC_HEADERS
+ "ascii.h"
+ "escaping.h"
+ "match.h"
+ "numbers.h"
+ "str_cat.h"
+ "string_view.h"
+ "strip.h"
+ "str_join.h"
+ "str_replace.h"
+ "str_split.h"
+ "substitute.h"
+)
+
+
+list(APPEND STRINGS_INTERNAL_HEADERS
+ "internal/char_map.h"
+ "internal/memutil.h"
+ "internal/ostringstream.h"
+ "internal/resize_uninitialized.h"
+ "internal/str_join_internal.h"
+ "internal/str_split_internal.h"
+ "internal/utf8.h"
+)
+
+
+
+# add string library
+list(APPEND STRINGS_SRC
+ "ascii.cc"
+ "escaping.cc"
+ "internal/memutil.cc"
+ "internal/memutil.h"
+ "internal/utf8.cc"
+ "internal/ostringstream.cc"
+ "match.cc"
+ "numbers.cc"
+ "str_cat.cc"
+ "str_replace.cc"
+ "str_split.cc"
+ "string_view.cc"
+ "substitute.cc"
+ ${STRINGS_PUBLIC_HEADERS}
+ ${STRINGS_INTERNAL_HEADERS}
+)
+set(STRINGS_PUBLIC_LIBRARIES absl::base absl_throw_delegate)
+
+absl_library(
+ TARGET
+ absl_strings
+ SOURCES
+ ${STRINGS_SRC}
+ PUBLIC_LIBRARIES
+ ${STRINGS_PUBLIC_LIBRARIES}
+ EXPORT_NAME
+ strings
+)
+
+
+#
+## TESTS
+#
+
+# test match_test
+set(MATCH_TEST_SRC "match_test.cc")
+set(MATCH_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ match_test
+ SOURCES
+ ${MATCH_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${MATCH_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test escaping_test
+set(ESCAPING_TEST_SRC "escaping_test.cc")
+set(ESCAPING_TEST_PUBLIC_LIBRARIES absl::strings absl::base)
+
+absl_test(
+ TARGET
+ escaping_test
+ SOURCES
+ ${ESCAPING_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${ESCAPING_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test ascii_test
+set(ASCII_TEST_SRC "ascii_test.cc")
+set(ASCII_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ ascii_test
+ SOURCES
+ ${ASCII_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${ASCII_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test memutil_test
+set(MEMUTIL_TEST_SRC "internal/memutil_test.cc")
+set(MEMUTIL_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ memutil_test
+ SOURCES
+ ${MEMUTIL_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${MEMUTIL_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test utf8_test
+set(UTF8_TEST_SRC "internal/utf8_test.cc")
+set(UTF8_TEST_PUBLIC_LIBRARIES absl::strings absl::base)
+
+absl_test(
+ TARGET
+ utf8_test
+ SOURCES
+ ${UTF8_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${UTF8_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test string_view_test
+set(STRING_VIEW_TEST_SRC "string_view_test.cc")
+set(STRING_VIEW_TEST_PUBLIC_LIBRARIES absl::strings absl_throw_delegate absl::base)
+
+absl_test(
+ TARGET
+ string_view_test
+ SOURCES
+ ${STRING_VIEW_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${STRING_VIEW_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test substitute_test
+set(SUBSTITUTE_TEST_SRC "substitute_test.cc")
+set(SUBSTITUTE_TEST_PUBLIC_LIBRARIES absl::strings absl::base)
+
+absl_test(
+ TARGET
+ substitute_test
+ SOURCES
+ ${SUBSTITUTE_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${SUBSTITUTE_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test str_replace_test
+set(STR_REPLACE_TEST_SRC "str_replace_test.cc")
+set(STR_REPLACE_TEST_PUBLIC_LIBRARIES absl::strings absl::base absl_throw_delegate)
+
+absl_test(
+ TARGET
+ str_replace_test
+ SOURCES
+ ${STR_REPLACE_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${STR_REPLACE_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test str_split_test
+set(STR_SPLIT_TEST_SRC "str_split_test.cc")
+set(STR_SPLIT_TEST_PUBLIC_LIBRARIES absl::strings absl::base absl_throw_delegate)
+
+absl_test(
+ TARGET
+ str_split_test
+ SOURCES
+ ${STR_SPLIT_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${STR_SPLIT_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test ostringstream_test
+set(OSTRINGSTREAM_TEST_SRC "internal/ostringstream_test.cc")
+set(OSTRINGSTREAM_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ ostringstream_test
+ SOURCES
+ ${OSTRINGSTREAM_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${OSTRINGSTREAM_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test resize_uninitialized_test
+set(RESIZE_UNINITIALIZED_TEST_SRC "internal/resize_uninitialized_test.cc")
+
+absl_test(
+ TARGET
+ resize_uninitialized_test
+ SOURCES
+ ${RESIZE_UNINITIALIZED_TEST_SRC}
+)
+
+
+# test str_join_test
+set(STR_JOIN_TEST_SRC "str_join_test.cc")
+set(STR_JOIN_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ str_join_test
+ SOURCES
+ ${STR_JOIN_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${STR_JOIN_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test str_cat_test
+set(STR_CAT_TEST_SRC "str_cat_test.cc")
+set(STR_CAT_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ str_cat_test
+ SOURCES
+ ${STR_CAT_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${STR_CAT_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test numbers_test
+set(NUMBERS_TEST_SRC "numbers_test.cc")
+set(NUMBERS_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ numbers_test
+ SOURCES
+ ${NUMBERS_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${NUMBERS_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test strip_test
+set(STRIP_TEST_SRC "strip_test.cc")
+set(STRIP_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ strip_test
+ SOURCES
+ ${STRIP_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${STRIP_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test char_map_test
+set(CHAR_MAP_TEST_SRC "internal/char_map_test.cc")
+set(CHAR_MAP_TEST_PUBLIC_LIBRARIES absl::strings)
+
+absl_test(
+ TARGET
+ char_map_test
+ SOURCES
+ ${CHAR_MAP_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${CHAR_MAP_TEST_PUBLIC_LIBRARIES}
+)
+
+
+
+
return x & 0xf;
}
+inline bool IsSurrogate(char32_t c, absl::string_view src, std::string* error) {
+ if (c >= 0xD800 && c <= 0xDFFF) {
+ if (error) {
+ *error = absl::StrCat("invalid surrogate character (0xD800-DFFF): \\",
+ src);
+ }
+ return true;
+ }
+ return false;
+}
+
// ----------------------------------------------------------------------
// CUnescapeInternal()
// Implements both CUnescape() and CUnescapeForNullTerminatedString().
d += 5;
break;
}
+ if (IsSurrogate(rune, absl::string_view(hex_start, 5), error)) {
+ return false;
+ }
d += strings_internal::EncodeUTF8Char(d, rune);
break;
}
d += 9;
break;
}
+ if (IsSurrogate(rune, absl::string_view(hex_start, 9), error)) {
+ return false;
+ }
d += strings_internal::EncodeUTF8Char(d, rune);
break;
}
// Example:
//
// std::string s = "foo\\rbar\\nbaz\\t";
-// std::string unescaped_s = absl::CUnescape(s);
+// std::string unescaped_s;
+// if (!absl::CUnescape(s, &unescaped_s) {
+// ...
+// }
// EXPECT_EQ(unescaped_s, "foo\rbar\nbaz\t");
bool CUnescape(absl::string_view source, std::string* dest, std::string* error);
EXPECT_EQ(out, val.unescaped);
}
std::string bad[] =
- {"\\u1", // too short
- "\\U1", // too short
- "\\Uffffff",
- "\\777", // exceeds 0xff
- "\\xABCD"}; // exceeds 0xff
+ {"\\u1", // too short
+ "\\U1", // too short
+ "\\Uffffff", // exceeds 0x10ffff (largest Unicode)
+ "\\U00110000", // exceeds 0x10ffff (largest Unicode)
+ "\\uD835", // surrogate character (D800-DFFF)
+ "\\U0000DD04", // surrogate character (D800-DFFF)
+ "\\777", // exceeds 0xff
+ "\\xABCD"}; // exceeds 0xff
for (const std::string& e : bad) {
std::string error;
std::string out;
namespace {
-// Previously documented minimum buffer sizes for Fast*ToBuffer functions.
-// NOTE(edk): These should be deleted and uses replaced with kFastToBufferSize
-// once existing code has been fixed to use kFastToBufferSize.
-enum {
- kFastInt32ToBufferSize = 12,
- kFastInt64ToBufferSize = 22,
- kFastUInt32ToBufferSize = 12,
- kFastUInt64ToBufferSize = 22
-};
-
template <typename IntType>
bool Itoa(IntType value, int base, std::string* destination) {
destination->clear();
--- /dev/null
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/ostringstream.h"
+
+namespace absl {
+namespace strings_internal {
+
+OStringStream::Buf::int_type OStringStream::overflow(int c) {
+ assert(s_);
+ if (!Buf::traits_type::eq_int_type(c, Buf::traits_type::eof()))
+ s_->push_back(static_cast<char>(c));
+ return 1;
+}
+
+std::streamsize OStringStream::xsputn(const char* s, std::streamsize n) {
+ assert(s_);
+ s_->append(s, n);
+ return n;
+}
+
+} // namespace strings_internal
+} // namespace absl
private:
using Buf = std::basic_streambuf<char>;
- Buf::int_type overflow(int c = Buf::traits_type::eof()) override {
- assert(s_);
- if (!Buf::traits_type::eq_int_type(c, Buf::traits_type::eof()))
- s_->push_back(static_cast<char>(c));
- return 1;
- }
-
- std::streamsize xsputn(const char* s, std::streamsize n) override {
- assert(s_);
- s_->append(s, n);
- return n;
- }
+ Buf::int_type overflow(int c) override;
+ std::streamsize xsputn(const char* s, std::streamsize n) override;
std::string* s_;
};
#include <cstddef>
#include <cstdint>
-
namespace absl {
namespace strings_internal {
// Examples:
// std::string s = "foo";
// absl::string_view sv = "f";
-// EXPECT_TRUE(absl::StrContains(s, sv));
+// assert(absl::StrContains(s, sv));
//
// Note: The order of parameters in these functions is designed to mimic the
// order an equivalent member function would exhibit;
inline bool StartsWith(absl::string_view text, absl::string_view prefix) {
return prefix.empty() ||
(text.size() >= prefix.size() &&
- memcmp(text.data(), prefix.data(), prefix.size()) == 0);
+ memcmp(text.data(), prefix.data(), prefix.size()) == 0);
}
// EndsWith()
return suffix.empty() ||
(text.size() >= suffix.size() &&
memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
- suffix.size()) == 0);
+ suffix.size()) == 0
+ );
}
// StartsWithIgnoreCase()
// SimpleAtob()
//
-// Converts the given std::string into into a boolean, returning `true` if
-// successful. The following case-insensitive strings are interpreted as boolean
-// `true`: "true", "t", "yes", "y", "1". The following case-insensitive strings
+// Converts the given std::string into a boolean, returning `true` if successful.
+// The following case-insensitive strings are interpreted as boolean `true`:
+// "true", "t", "yes", "y", "1". The following case-insensitive strings
// are interpreted as boolean `false`: "false", "f", "no", "n", "0".
ABSL_MUST_USE_RESULT bool SimpleAtob(absl::string_view str, bool* value);
namespace {
-using absl::numbers_internal::FastInt32ToBuffer;
-using absl::numbers_internal::FastInt64ToBuffer;
-using absl::numbers_internal::FastUInt32ToBuffer;
-using absl::numbers_internal::FastUInt64ToBuffer;
-using absl::numbers_internal::kFastToBufferSize;
using absl::numbers_internal::kSixDigitsToBufferSize;
using absl::numbers_internal::safe_strto32_base;
using absl::numbers_internal::safe_strto64_base;
}
void CheckInt32(int32_t x) {
- char buffer[kFastInt32ToBufferSize];
- char* actual = FastInt32ToBuffer(x, buffer);
+ char buffer[absl::numbers_internal::kFastToBufferSize];
+ char* actual = absl::numbers_internal::FastInt32ToBuffer(x, buffer);
std::string expected = std::to_string(x);
- ASSERT_TRUE(expected == actual)
+ ASSERT_TRUE(expected == std::string(buffer, actual))
<< "Expected \"" << expected << "\", Actual \"" << actual << "\", Input "
<< x;
}
void CheckInt64(int64_t x) {
- char buffer[kFastInt64ToBufferSize + 3];
+ char buffer[absl::numbers_internal::kFastToBufferSize + 3];
buffer[0] = '*';
buffer[23] = '*';
buffer[24] = '*';
- char* actual = FastInt64ToBuffer(x, &buffer[1]);
+ char* actual = absl::numbers_internal::FastInt64ToBuffer(x, &buffer[1]);
std::string expected = std::to_string(x);
- ASSERT_TRUE(expected == actual)
+ ASSERT_TRUE(expected == std::string(&buffer[1], actual))
<< "Expected \"" << expected << "\", Actual \"" << actual << "\", Input "
<< x;
ASSERT_EQ(buffer[0], '*');
}
void CheckUInt32(uint32_t x) {
- char buffer[kFastUInt64ToBufferSize];
- char* actual = FastUInt32ToBuffer(x, buffer);
+ char buffer[absl::numbers_internal::kFastToBufferSize];
+ char* actual = absl::numbers_internal::FastUInt32ToBuffer(x, buffer);
std::string expected = std::to_string(x);
- ASSERT_TRUE(expected == actual)
+ ASSERT_TRUE(expected == std::string(buffer, actual))
<< "Expected \"" << expected << "\", Actual \"" << actual << "\", Input "
<< x;
}
void CheckUInt64(uint64_t x) {
- char buffer[kFastUInt64ToBufferSize + 1];
- char* actual = FastUInt64ToBuffer(x, &buffer[1]);
+ char buffer[absl::numbers_internal::kFastToBufferSize + 1];
+ char* actual = absl::numbers_internal::FastUInt64ToBuffer(x, &buffer[1]);
std::string expected = std::to_string(x);
- ASSERT_TRUE(expected == actual)
+ ASSERT_TRUE(expected == std::string(&buffer[1], actual))
<< "Expected \"" << expected << "\", Actual \"" << actual << "\", Input "
<< x;
}
void CheckHex64(uint64_t v) {
- char expected[kFastUInt64ToBufferSize];
+ char expected[16 + 1];
std::string actual = absl::StrCat(absl::Hex(v, absl::kZeroPad16));
snprintf(expected, sizeof(expected), "%016" PRIx64, static_cast<uint64_t>(v));
ASSERT_TRUE(expected == actual)
<< "Expected \"" << expected << "\", Actual \"" << actual << "\"";
}
-void TestFastPrints() {
+TEST(Numbers, TestFastPrints) {
for (int i = -100; i <= 100; i++) {
CheckInt32(i);
CheckInt64(i);
}
std::string ToNineDigits(double value) {
- char buffer[kFastToBufferSize]; // more than enough for %.9g
+ char buffer[16]; // more than enough for %.9g
snprintf(buffer, sizeof(buffer), "%.9g", value);
return buffer;
}
"World"
};
+ std::string stdstrs[] = {
+ "std::Hello",
+ "std::Cruel",
+ "std::World"
+ };
+
absl::string_view pieces[] = {"Hello", "Cruel", "World"};
const char* c_strs[] = {
EXPECT_EQ(result.substr(old_size), "CruelWorld");
old_size = result.size();
- absl::StrAppend(&result, strs[0], ", ", pieces[2]);
- EXPECT_EQ(result.substr(old_size), "Hello, World");
+ absl::StrAppend(&result, stdstrs[0], ", ", pieces[2]);
+ EXPECT_EQ(result.substr(old_size), "std::Hello, World");
old_size = result.size();
- absl::StrAppend(&result, strs[0], ", ", strs[1], " ", strs[2], "!");
- EXPECT_EQ(result.substr(old_size), "Hello, Cruel World!");
+ absl::StrAppend(&result, strs[0], ", ", stdstrs[1], " ", strs[2], "!");
+ EXPECT_EQ(result.substr(old_size), "Hello, std::Cruel World!");
old_size = result.size();
absl::StrAppend(&result, pieces[0], ", ", pieces[1], " ", pieces[2]);
// // Joins a `std::map`, with each key-value pair separated by an equals
// // sign. This pattern would also work with, say, a
// // `std::vector<std::pair<>>`.
-// std::map<std::string, int> m = {
-// std::make_pair("a", 1),
-// std::make_pair("b", 2),
-// std::make_pair("c", 3)};
-// std::string s = absl::StrJoin(m, ",", strings::PairFormatter("="));
+// std::map<std::string, int> m = {
+// std::make_pair("a", 1),
+// std::make_pair("b", 2),
+// std::make_pair("c", 3)};
+// std::string s = absl::StrJoin(m, ",", absl::PairFormatter("="));
// EXPECT_EQ("a=1,b=2,c=3", s);
//
// Example 7:
EXPECT_FALSE(IsFoundAt("abcd", four_char_delim, 0));
}
-// Allocates too much memory for TSan and MSan.
-#if !defined(THREAD_SANITIZER) && !defined(MEMORY_SANITIZER)
TEST(Split, WorksWithLargeStrings) {
- if (sizeof(size_t) > 4 && !RunningOnValgrind()) {
- std::string s(1ULL << 31, 'x');
- s.push_back('-'); // 2G + 1 byte
+ if (sizeof(size_t) > 4) {
+ std::string s((uint32_t{1} << 31) + 1, 'x'); // 2G + 1 byte
+ s.back() = '-';
std::vector<absl::string_view> v = absl::StrSplit(s, '-');
EXPECT_EQ(2, v.size());
// The first element will contain 2G of 'x's.
EXPECT_EQ("", v[1]);
}
}
-#endif // THREAD_SANITIZER
TEST(SplitInternalTest, TypeTraits) {
EXPECT_FALSE(absl::strings_internal::HasMappedType<int>::value);
// string_view::remove_prefix()
//
- // Removes the first `n` characters from the `string_view`, returning a
- // pointer to the new first character. Note that the underlying std::string is not
- // changed, only the view.
+ // Removes the first `n` characters from the `string_view`. Note that the
+ // underlying std::string is not changed, only the view.
void remove_prefix(size_type n) {
assert(n <= length_);
ptr_ += n;
constexpr absl::string_view::iterator const_begin_empty = sp.begin();
constexpr absl::string_view::iterator const_end_empty = sp.end();
EXPECT_EQ(const_begin_empty, const_end_empty);
+
+ constexpr absl::string_view::iterator const_begin_nullptr = cstr.begin();
+ constexpr absl::string_view::iterator const_end_nullptr = cstr.end();
+ EXPECT_EQ(const_begin_nullptr, const_end_nullptr);
#endif
constexpr absl::string_view::iterator const_begin = cstr_len.begin();
EXPECT_EQ(absl::StripSuffix("", ""), "");
}
+TEST(Strip, RemoveExtraAsciiWhitespace) {
+ const char* inputs[] = {
+ "No extra space",
+ " Leading whitespace",
+ "Trailing whitespace ",
+ " Leading and trailing ",
+ " Whitespace \t in\v middle ",
+ "'Eeeeep! \n Newlines!\n",
+ "nospaces",
+ };
+ const char* outputs[] = {
+ "No extra space",
+ "Leading whitespace",
+ "Trailing whitespace",
+ "Leading and trailing",
+ "Whitespace in middle",
+ "'Eeeeep! Newlines!",
+ "nospaces",
+ };
+ int NUM_TESTS = 7;
+
+ for (int i = 0; i < NUM_TESTS; i++) {
+ std::string s(inputs[i]);
+ absl::RemoveExtraAsciiWhitespace(&s);
+ EXPECT_STREQ(outputs[i], s.c_str());
+ }
+
+ // Test that absl::RemoveExtraAsciiWhitespace returns immediately for empty
+ // strings (It was adding the \0 character to the C++ std::string, which broke
+ // tests involving empty())
+ std::string zero_string = "";
+ assert(zero_string.empty());
+ absl::RemoveExtraAsciiWhitespace(&zero_string);
+ EXPECT_EQ(zero_string.size(), 0);
+ EXPECT_TRUE(zero_string.empty());
+}
+
+TEST(Strip, StripTrailingAsciiWhitespace) {
+ std::string test = "foo ";
+ absl::StripTrailingAsciiWhitespace(&test);
+ EXPECT_EQ(test, "foo");
+
+ test = " ";
+ absl::StripTrailingAsciiWhitespace(&test);
+ EXPECT_EQ(test, "");
+
+ test = "";
+ absl::StripTrailingAsciiWhitespace(&test);
+ EXPECT_EQ(test, "");
+
+ test = " abc\t";
+ absl::StripTrailingAsciiWhitespace(&test);
+ EXPECT_EQ(test, " abc");
+}
+
+TEST(String, StripLeadingAsciiWhitespace) {
+ absl::string_view orig = "\t \n\f\r\n\vfoo";
+ EXPECT_EQ("foo", absl::StripLeadingAsciiWhitespace(orig));
+ orig = "\t \n\f\r\v\n\t \n\f\r\v\n";
+ EXPECT_EQ(absl::string_view(), absl::StripLeadingAsciiWhitespace(orig));
+}
+
} // namespace
"internal/graphcycles.h",
],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
deps = [
"//absl/base",
"//absl/base:core_headers",
"@com_google_googletest//:gtest_main",
],
)
+
+cc_test(
+ name = "lifetime_test",
+ srcs = [
+ "lifetime_test.cc",
+ ],
+ copts = ABSL_TEST_COPTS,
+ linkopts = select({
+ "//absl:windows": [],
+ "//conditions:default": ["-pthread"],
+ }),
+ deps = [
+ ":synchronization",
+ "//absl/base",
+ "//absl/base:core_headers",
+ ],
+)
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND SYNCHRONIZATION_PUBLIC_HEADERS
+ "barrier.h"
+ "blocking_counter.h"
+ "mutex.h"
+ "notification.h"
+)
+
+
+list(APPEND SYNCHRONIZATION_INTERNAL_HEADERS
+ "internal/create_thread_identity.h"
+ "internal/graphcycles.h"
+ "internal/kernel_timeout.h"
+ "internal/per_thread_sem.h"
+ "internal/thread_pool.h"
+ "internal/waiter.h"
+)
+
+
+
+# syncrhonisation library
+list(APPEND SYNCHRONIZATION_SRC
+ "barrier.cc"
+ "blocking_counter.cc"
+ "internal/create_thread_identity.cc"
+ "internal/per_thread_sem.cc"
+ "internal/waiter.cc"
+ "internal/graphcycles.cc"
+ "notification.cc"
+ "mutex.cc"
+)
+set(SYNCHRONIZATION_PUBLIC_LIBRARIES absl::base absl_malloc_extension absl::time)
+
+absl_library(
+ TARGET
+ absl_synchronization
+ SOURCES
+ ${SYNCHRONIZATION_SRC}
+ PUBLIC_LIBRARIES
+ ${SYNCHRONIZATION_PUBLIC_LIBRARIES}
+ EXPORT_NAME
+ synchronization
+)
+
+
+#
+## TESTS
+#
+
+
+# test barrier_test
+set(BARRIER_TEST_SRC "barrier_test.cc")
+set(BARRIER_TEST_PUBLIC_LIBRARIES absl::synchronization)
+
+absl_test(
+ TARGET
+ barrier_test
+ SOURCES
+ ${BARRIER_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${BARRIER_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test blocking_counter_test
+set(BLOCKING_COUNTER_TEST_SRC "blocking_counter_test.cc")
+set(BLOCKING_COUNTER_TEST_PUBLIC_LIBRARIES absl::synchronization)
+
+absl_test(
+ TARGET
+ blocking_counter_test
+ SOURCES
+ ${BLOCKING_COUNTER_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${BLOCKING_COUNTER_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test graphcycles_test
+set(GRAPHCYCLES_TEST_SRC "internal/graphcycles_test.cc")
+set(GRAPHCYCLES_TEST_PUBLIC_LIBRARIES absl::synchronization)
+
+absl_test(
+ TARGET
+ graphcycles_test
+ SOURCES
+ ${GRAPHCYCLES_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${GRAPHCYCLES_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test mutex_test
+set(MUTEX_TEST_SRC "mutex_test.cc")
+set(MUTEX_TEST_PUBLIC_LIBRARIES absl::synchronization)
+
+absl_test(
+ TARGET
+ mutex_test
+ SOURCES
+ ${MUTEX_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${MUTEX_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test notification_test
+set(NOTIFICATION_TEST_SRC "notification_test.cc")
+set(NOTIFICATION_TEST_PUBLIC_LIBRARIES absl::synchronization)
+
+absl_test(
+ TARGET
+ notification_test
+ SOURCES
+ ${NOTIFICATION_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${NOTIFICATION_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test per_thread_sem_test_common
+set(PER_THREAD_SEM_TEST_COMMON_SRC "internal/per_thread_sem_test.cc")
+set(PER_THREAD_SEM_TEST_COMMON_PUBLIC_LIBRARIES absl::synchronization absl::strings)
+
+absl_test(
+ TARGET
+ per_thread_sem_test_common
+ SOURCES
+ ${PER_THREAD_SEM_TEST_COMMON_SRC}
+ PUBLIC_LIBRARIES
+ ${PER_THREAD_SEM_TEST_COMMON_PUBLIC_LIBRARIES}
+)
+
+
+
+
+
+
+
namespace absl {
namespace synchronization_internal {
+class Futex;
class Waiter;
class KernelTimeout {
}
#endif
+ friend class Futex;
friend class Waiter;
};
cond, synchronization_internal::DeadlineFromTimeout(timeout));
}
+void Mutex::ReaderLockWhen(const Condition& cond) {
+ ReaderLock();
+ Await(cond);
+}
+
bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
absl::Duration timeout) {
return LockWhenWithTimeout(cond, timeout);
};
// Do not use this implementation detail of CondVar and Mutex. A storage
-// space for T that supports a base::LinkerInitialized constructor. T must
+// space for T that supports a LinkerInitialized constructor. T must
// have a default constructor, which is called by the first call to
-// get(). T's destructor is never called if the base::LinkerInitialized
+// get(). T's destructor is never called if the LinkerInitialized
// constructor is called.
//
// Objects constructed with the default constructor are constructed and
// destructed like any other object, and should never be allocated in
// static storage.
//
-// Objects constructed with the base::LinkerInitialized constructor should
+// Objects constructed with the LinkerInitialized constructor should
// always be in static storage. For such objects, calls to get() are always
// valid, except from signal handlers.
//
//
// [basic.life] says an object has non-trivial initialization if it is of
// class type and it is initialized by a constructor other than a trivial
-// default constructor. (the base::LinkerInitialized constructor is
+// default constructor. (the LinkerInitialized constructor is
// non-trivial)
//
// [basic.life] says the lifetime of an object with a non-trivial
// members of an instance outside its
// lifetime. (SynchronizationStorage::get() access non-static members)
//
-// So, base::LinkerInitialized object of SynchronizationStorage uses a
+// So, LinkerInitialized object of SynchronizationStorage uses a
// non-trivial constructor, which is called at some point during dynamic
// initialization, and is therefore subject to order of dynamic
// initialization bugs, where get() is called before the object's
// constructor is, resulting in undefined behavior.
//
-// Similarly, a base::LinkerInitialized SynchronizationStorage object has a
+// Similarly, a LinkerInitialized SynchronizationStorage object has a
// non-trivial destructor, and so its lifetime ends at some point during
// destruction of objects with static storage duration [basic.start.term]
// p4. There is a window where other exit code could call get() after this
// occurs, resulting in undefined behavior.
//
-// Combined, these statements imply that base::LinkerInitialized instances
+// Combined, these statements imply that LinkerInitialized instances
// of SynchronizationStorage<T> rely on undefined behavior.
//
// However, in practice, the implementation works on all supported
// compilers. Specifically, we rely on:
//
// a) zero-initialization being sufficient to initialize
-// base::LinkerInitialized instances for the purposes of calling
+// LinkerInitialized instances for the purposes of calling
// get(), regardless of when the constructor is called. This is
// because the is_dynamic_ boolean is correctly zero-initialized to
// false.
//
-// b) the base::LinkerInitialized constructor is a NOP, and immaterial to
+// b) the LinkerInitialized constructor is a NOP, and immaterial to
// even to concurrent calls to get().
//
-// c) the destructor being a NOP for base::LinkerInitialized objects
+// c) the destructor being a NOP for LinkerInitialized objects
// (guaranteed by a check for !is_dynamic_), and so any concurrent and
// subsequent calls to get() functioning as if the destructor were not
// called, by virtue of the instances' storage remaining valid after the
// Instances allocated in static storage (not on the heap, not on the
// stack) should use this constructor.
- explicit SynchronizationStorage(base::LinkerInitialized) {}
+ explicit SynchronizationStorage(base_internal::LinkerInitialized) {}
SynchronizationStorage(SynchronizationStorage&) = delete;
SynchronizationStorage& operator=(SynchronizationStorage&) = delete;
// incur the cost of absl::call_once().
//
// For instances in static storage constructed with the
- // base::LinkerInitialized constructor, may be called at any time without
+ // LinkerInitialized constructor, may be called at any time without
// regard for order of dynamic initialization or destruction of objects
// in static storage. See the class comment for caveats.
T* get() {
// When true, T's destructor is run when this is destructed.
//
- // The base::LinkerInitialized constructor assumes this value will be set
+ // The LinkerInitialized constructor assumes this value will be set
// false by static initialization.
bool is_dynamic_;
#include <atomic>
#include <cassert>
+#include <cstdint>
#include "absl/base/internal/malloc_extension.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
namespace absl {
#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
#endif
#endif
+class Futex {
+ public:
+ static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
+ KernelTimeout t) {
+ int err = 0;
+ if (t.has_timeout()) {
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ struct timespec abs_timeout = t.MakeAbsTimespec();
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ err = syscall(
+ SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+ &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until woken by FUTEX_WAKE.
+ err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
+ }
+ if (err != 0) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int Wake(std::atomic<int32_t> *v, int32_t count) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+};
void Waiter::Init() {
futex_.store(0, std::memory_order_relaxed);
// Loop until we can atomically decrement futex from a positive
// value, waiting on a futex while we believe it is zero.
while (true) {
- int x = futex_.load(std::memory_order_relaxed);
+ int32_t x = futex_.load(std::memory_order_relaxed);
if (x != 0) {
if (!futex_.compare_exchange_weak(x, x - 1,
std::memory_order_acquire,
return true; // Consumed a wakeup, we are done.
}
- int err = 0;
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int *>(&futex_),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, 0,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
- } else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int *>(&futex_),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, nullptr);
- }
+ const int err = Futex::WaitUntil(&futex_, 0, t);
if (err != 0) {
- if (errno == EINTR || errno == EWOULDBLOCK) {
+ if (err == -EINTR || err == -EWOULDBLOCK) {
// Do nothing, the loop will retry.
- } else if (errno == ETIMEDOUT) {
- return false; // Timeout.
+ } else if (err == -ETIMEDOUT) {
+ return false;
} else {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with errno %d\n", errno);
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
}
}
void Waiter::Poke() {
// Wake one thread waiting on the futex.
- int err = syscall(SYS_futex, reinterpret_cast<int *>(&futex_),
- FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
- if (err < 0) {
- ABSL_RAW_LOG(FATAL, "FUTEX_WAKE failed with errno %d\n", errno);
+ const int err = Futex::Wake(&futex_, 1);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
}
}
#endif
#include <atomic>
+#include <cstdint>
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/kernel_timeout.h"
private:
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
- // Futexes are defined by specification to be ints.
- // Thus std::atomic<int> must be just an int with lockfree methods.
- std::atomic<int> futex_;
- static_assert(sizeof(int) == sizeof(futex_), "Wrong size for futex");
+ // Futexes are defined by specification to be 32-bits.
+ // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
+ std::atomic<int32_t> futex_;
+ static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
pthread_mutex_t mu_;
--- /dev/null
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdlib>
+#include <thread> // NOLINT(build/c++11), Abseil test
+#include <type_traits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/synchronization/notification.h"
+
+namespace {
+
+// A two-threaded test which checks that Mutex, CondVar, and Notification have
+// correct basic functionality. The intent is to establish that they
+// function correctly in various phases of construction and destruction.
+//
+// Thread one acquires a lock on 'mutex', wakes thread two via 'notification',
+// then waits for 'state' to be set, as signalled by 'condvar'.
+//
+// Thread two waits on 'notification', then sets 'state' inside the 'mutex',
+// signalling the change via 'condvar'.
+//
+// These tests use ABSL_RAW_CHECK to validate invariants, rather than EXPECT or
+// ASSERT from gUnit, because we need to invoke them during global destructors,
+// when gUnit teardown would have already begun.
+void ThreadOne(absl::Mutex* mutex, absl::CondVar* condvar,
+ absl::Notification* notification, bool* state) {
+ // Test that the notification is in a valid initial state.
+ ABSL_RAW_CHECK(!notification->HasBeenNotified(), "invalid Notification");
+ ABSL_RAW_CHECK(*state == false, "*state not initialized");
+
+ {
+ absl::MutexLock lock(mutex);
+
+ notification->Notify();
+ ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
+
+ while (*state == false) {
+ condvar->Wait(mutex);
+ }
+ }
+}
+
+void ThreadTwo(absl::Mutex* mutex, absl::CondVar* condvar,
+ absl::Notification* notification, bool* state) {
+ ABSL_RAW_CHECK(*state == false, "*state not initialized");
+
+ // Wake thread one
+ notification->WaitForNotification();
+ ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
+ {
+ absl::MutexLock lock(mutex);
+ *state = true;
+ condvar->Signal();
+ }
+}
+
+// Launch thread 1 and thread 2, and block on their completion.
+void RunTests(absl::Mutex* mutex, absl::CondVar* condvar,
+ absl::Notification* notification) {
+ bool state = false;
+ std::thread thread_one(ThreadOne, mutex, condvar, notification, &state);
+ std::thread thread_two(ThreadTwo, mutex, condvar, notification, &state);
+ thread_one.join();
+ thread_two.join();
+}
+
+void TestLocals() {
+ absl::Mutex mutex;
+ absl::CondVar condvar;
+ absl::Notification notification;
+ RunTests(&mutex, &condvar, ¬ification);
+}
+
+} // namespace
+
+int main() {
+ TestLocals();
+ // Explicitly call exit(0) here, to make it clear that we intend for the
+ // above global object destructors to run.
+ std::exit(0);
+}
#endif
Mutex::Mutex() : mu_(0) {
- ABSL_TSAN_MUTEX_CREATE(this, 0);
+ ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
static bool DebugOnlyIsExiting() {
if (kDebugMode) {
this->ForgetDeadlockInfo();
}
- ABSL_TSAN_MUTEX_DESTROY(this, 0);
+ ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
void Mutex::EnableDebugLog(const char *name) {
// // assume count_ is not internal reference count
// int count_ GUARDED_BY(mu_);
//
-// mu_.LockWhen(Condition(+[](const int* count) { return *count == 0; },
+// mu_.LockWhen(Condition(+[](int* count) { return *count == 0; },
// &count_));
//
// When multiple threads are waiting on exactly the same condition, make sure
}
static void TestMu(TestContext *cxt, int c) {
- SetInvariantChecked(false);
- cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
for (int i = 0; i != cxt->iterations; i++) {
absl::MutexLock l(&cxt->mu);
int a = cxt->g0 + 1;
}
static void TestTry(TestContext *cxt, int c) {
- SetInvariantChecked(false);
- cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
for (int i = 0; i != cxt->iterations; i++) {
do {
std::this_thread::yield();
}
static void TestRW(TestContext *cxt, int c) {
- SetInvariantChecked(false);
- cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
if ((c & 1) == 0) {
for (int i = 0; i != cxt->iterations; i++) {
absl::WriterMutexLock l(&cxt->mu);
cv->Signal();
}
-// Basis for the parameterized tests configured below.
-static int RunTest(void (*test)(TestContext *cxt, int), int threads,
- int iterations, int operations) {
- TestContext cxt;
+// Code common to RunTest() and RunTestWithInvariantDebugging().
+static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int),
+ int threads, int iterations, int operations) {
absl::Mutex mu2;
absl::CondVar cv2;
- int c0;
- int c1;
-
- // run with large thread count for full test and to get timing
-
-#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
- absl::EnableMutexInvariantDebugging(false);
-#endif
- c0 = 0;
- c1 = 0;
- cxt.g0 = 0;
- cxt.g1 = 0;
- cxt.iterations = iterations;
- cxt.threads = threads;
+ int c0 = 0;
+ int c1 = 0;
+ cxt->g0 = 0;
+ cxt->g1 = 0;
+ cxt->iterations = iterations;
+ cxt->threads = threads;
absl::synchronization_internal::ThreadPool tp(threads);
for (int i = 0; i != threads; i++) {
tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
std::function<void(int)>(
- std::bind(test, &cxt, std::placeholders::_1))));
+ std::bind(test, cxt, std::placeholders::_1))));
}
mu2.Lock();
while (c1 != threads) {
cv2.Wait(&mu2);
}
mu2.Unlock();
- int saved_g0 = cxt.g0;
+ return cxt->g0;
+}
- // run again with small number of iterations to test invariant checking
+// Basis for the parameterized tests configured below.
+static int RunTest(void (*test)(TestContext *cxt, int), int threads,
+ int iterations, int operations) {
+ TestContext cxt;
+ return RunTestCommon(&cxt, test, threads, iterations, operations);
+}
+// Like RunTest(), but sets an invariant on the tested Mutex and
+// verifies that the invariant check happened. The invariant function
+// will be passed the TestContext* as its arg and must call
+// SetInvariantChecked(true);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
+ int threads, int iterations,
+ int operations,
+ void (*invariant)(void *)) {
absl::EnableMutexInvariantDebugging(true);
-#endif
- SetInvariantChecked(true);
- c0 = 0;
- c1 = 0;
- cxt.g0 = 0;
- cxt.g1 = 0;
- cxt.iterations = (iterations > 10 ? 10 : iterations);
- cxt.threads = threads;
- for (int i = 0; i != threads; i++) {
- tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
- std::function<void(int)>(
- std::bind(test, &cxt, std::placeholders::_1))));
- }
- mu2.Lock();
- while (c1 != threads) {
- cv2.Wait(&mu2);
- }
- mu2.Unlock();
-#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ SetInvariantChecked(false);
+ TestContext cxt;
+ cxt.mu.EnableInvariantDebugging(invariant, &cxt);
+ int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
-#endif
-
- return saved_g0;
+ absl::EnableMutexInvariantDebugging(false); // Restore.
+ return ret;
}
+#endif
// --------------------------------------------------------
// Test for fix of bug in TryRemove()
int iterations = ScaleIterations(10000000) / threads;
int operations = threads * iterations;
EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ iterations = std::min(iterations, 10);
+ operations = threads * iterations;
+ EXPECT_EQ(RunTestWithInvariantDebugging(&TestMu, threads, iterations,
+ operations, CheckSumG0G1),
+ operations);
+#endif
}
TEST_P(MutexVariableThreadCountTest, Try) {
int iterations = 1000000 / threads;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ iterations = std::min(iterations, 10);
+ operations = threads * iterations;
+ EXPECT_EQ(RunTestWithInvariantDebugging(&TestTry, threads, iterations,
+ operations, CheckSumG0G1),
+ operations);
+#endif
}
TEST_P(MutexVariableThreadCountTest, R20ms) {
int iterations = ScaleIterations(20000000) / threads;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ iterations = std::min(iterations, 10);
+ operations = threads * iterations;
+ EXPECT_EQ(RunTestWithInvariantDebugging(&TestRW, threads, iterations,
+ operations, CheckSumG0G1),
+ operations / 2);
+#endif
}
TEST_P(MutexVariableThreadCountTest, Await) {
],
hdrs = ["internal/test_util.h"],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl/time:__pkg__",
+ ],
deps = [
":time",
"//absl/base",
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND TIME_PUBLIC_HEADERS
+ "clock.h"
+ "time.h"
+)
+
+
+list(APPEND TIME_INTERNAL_HEADERS
+ "internal/test_util.h"
+)
+
+list(APPEND TIME_SRC
+ "time.cc"
+ "clock.cc"
+ "duration.cc"
+ "format.cc"
+ ${TIME_PUBLIC_HEADERS}
+ ${TIME_INTERNAL_HEADERS}
+)
+set(TIME_PUBLIC_LIBRARIES absl::base absl::stacktrace absl::int128 cctz)
+
+absl_library(
+ TARGET
+ absl_time
+ SOURCES
+ ${TIME_SRC}
+ PUBLIC_LIBRARIES
+ ${TIME_PUBLIC_LIBRARIES}
+ PUBLIC_INCLUDE_DIRS
+ ${CCTZ_INCLUDE_DIRS}
+ EXPORT_NAME
+ time
+)
+
+
+
+#
+## TESTS
+#
+
+# test time_test
+list(APPEND TIME_TEST_SRC
+ "time_test.cc"
+ "clock_test.cc"
+ "duration_test.cc"
+ "format_test.cc"
+ "time_norm_test.cc"
+ "time_test.cc"
+ "time_zone_test.cc"
+ "internal/test_util.cc"
+)
+set(TIME_TEST_PUBLIC_LIBRARIES absl::time)
+
+absl_test(
+ TARGET
+ time_test
+ SOURCES
+ ${TIME_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${TIME_TEST_PUBLIC_LIBRARIES}
+)
+
+
// Returns the maximum duration that SleepOnce() can sleep for.
constexpr absl::Duration MaxSleep() {
#ifdef _WIN32
- // Windows _sleep() takes unsigned long argument in milliseconds.
+ // Windows Sleep() takes unsigned long argument in milliseconds.
return absl::Milliseconds(
std::numeric_limits<unsigned long>::max()); // NOLINT(runtime/int)
#else
// REQUIRES: to_sleep <= MaxSleep().
void SleepOnce(absl::Duration to_sleep) {
#ifdef _WIN32
- _sleep(to_sleep / absl::Milliseconds(1));
+ Sleep(to_sleep / absl::Milliseconds(1));
#else
struct timespec sleep_time = absl::ToTimespec(to_sleep);
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
absl::SleepFor(sleep_time);
absl::Time end = absl::Now();
EXPECT_LE(sleep_time - absl::Milliseconds(100), end - start);
- EXPECT_GE(sleep_time + absl::Milliseconds(100), end - start);
+ EXPECT_GE(sleep_time + absl::Milliseconds(200), end - start);
}
#ifdef ABSL_HAVE_ALARM
absl::Time end = absl::Now();
EXPECT_TRUE(alarm_handler_invoked);
EXPECT_LE(sleep_time - absl::Milliseconds(100), end - start);
- EXPECT_GE(sleep_time + absl::Milliseconds(100), end - start);
+ EXPECT_GE(sleep_time + absl::Milliseconds(200), end - start);
signal(SIGALRM, old_alarm);
}
#endif // ABSL_HAVE_ALARM
char* Format64(char* ep, int width, int64_t v) {
do {
--width;
- *--ep = "0123456789"[v % 10];
+ *--ep = '0' + (v % 10); // contiguous digits
} while (v /= 10);
while (--width >= 0) *--ep = '0'; // zero pad
return ep;
namespace {
// A helper for ParseDuration() that parses a leading number from the given
-// std::string and stores the result in *n. The given std::string pointer is modified
-// to point to the first unconsumed char.
-bool ConsumeDurationNumber(const char** start, double* n) {
- const char* s = *start;
- char* end = nullptr;
- errno = 0;
- *n = strtod(s, &end);
- *start = end;
- return !std::isspace(*s) && errno == 0 && end != s && *n >= 0;
+// std::string and stores the result in *int_part/*frac_part/*frac_scale. The
+// given std::string pointer is modified to point to the first unconsumed char.
+bool ConsumeDurationNumber(const char** dpp, int64_t* int_part,
+ int64_t* frac_part, int64_t* frac_scale) {
+ *int_part = 0;
+ *frac_part = 0;
+ *frac_scale = 1; // invariant: *frac_part < *frac_scale
+ const char* start = *dpp;
+ for (; std::isdigit(**dpp); *dpp += 1) {
+ const int d = **dpp - '0'; // contiguous digits
+ if (*int_part > kint64max / 10) return false;
+ *int_part *= 10;
+ if (*int_part > kint64max - d) return false;
+ *int_part += d;
+ }
+ const bool int_part_empty = (*dpp == start);
+ if (**dpp != '.') return !int_part_empty;
+ for (*dpp += 1; std::isdigit(**dpp); *dpp += 1) {
+ const int d = **dpp - '0'; // contiguous digits
+ if (*frac_scale <= kint64max / 10) {
+ *frac_part *= 10;
+ *frac_part += d;
+ *frac_scale *= 10;
+ }
+ }
+ return !int_part_empty || *frac_scale != 1;
}
// A helper for ParseDuration() that parses a leading unit designator (e.g.,
Duration dur;
while (*start != '\0') {
- double n = 0;
+ int64_t int_part;
+ int64_t frac_part;
+ int64_t frac_scale;
Duration unit;
- if (!ConsumeDurationNumber(&start, &n) ||
+ if (!ConsumeDurationNumber(&start, &int_part, &frac_part, &frac_scale) ||
!ConsumeDurationUnit(&start, &unit)) {
return false;
}
- dur += sign * n * unit;
+ if (int_part != 0) dur += sign * int_part * unit;
+ if (frac_part != 0) dur += sign * frac_part * unit / frac_scale;
}
*d = dur;
return true;
EXPECT_FALSE(absl::ParseDuration("2s ", &d));
EXPECT_FALSE(absl::ParseDuration(" 2s ", &d));
EXPECT_FALSE(absl::ParseDuration("2mt", &d));
+ EXPECT_FALSE(absl::ParseDuration("1e3s", &d));
// One unit type.
EXPECT_TRUE(absl::ParseDuration("1ns", &d));
EXPECT_TRUE(absl::ParseDuration("2h", &d));
EXPECT_EQ(absl::Hours(2), d);
+ // Huge counts of a unit.
+ EXPECT_TRUE(absl::ParseDuration("9223372036854775807us", &d));
+ EXPECT_EQ(absl::Microseconds(9223372036854775807), d);
+ EXPECT_TRUE(absl::ParseDuration("-9223372036854775807us", &d));
+ EXPECT_EQ(absl::Microseconds(-9223372036854775807), d);
+
// Multiple units.
EXPECT_TRUE(absl::ParseDuration("2h3m4s", &d));
EXPECT_EQ(absl::Hours(2) + absl::Minutes(3) + absl::Seconds(4), d);
EXPECT_TRUE(absl::ParseDuration("1.5h", &d));
EXPECT_EQ(1.5 * absl::Hours(1), d);
+ // Huge fractional counts of a unit.
+ EXPECT_TRUE(absl::ParseDuration("0.4294967295s", &d));
+ EXPECT_EQ(absl::Nanoseconds(429496729) + absl::Nanoseconds(1) / 2, d);
+ EXPECT_TRUE(absl::ParseDuration("0.429496729501234567890123456789s", &d));
+ EXPECT_EQ(absl::Nanoseconds(429496729) + absl::Nanoseconds(1) / 2, d);
+
// Negative durations.
EXPECT_TRUE(absl::ParseDuration("-1s", &d));
EXPECT_EQ(absl::Seconds(-1), d);
{"US/Pacific", //
reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
- // Allows use of the local time zone from a common system-specific location.
+ // Allows use of the local time zone from a system-specific location.
#ifdef _MSC_VER
{"localtime", //
reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
// Microseconds()
// Milliseconds()
// Seconds()
-// Minutes
+// Minutes()
// Hours()
//
// Factory functions for constructing `Duration` values from an integral number
// Example:
//
// absl::Duration d = absl::Milliseconds(1500);
-// int64_t isec = ToInt64Seconds(d); // isec == 1
+// int64_t isec = absl::ToInt64Seconds(d); // isec == 1
int64_t ToInt64Nanoseconds(Duration d);
int64_t ToInt64Microseconds(Duration d);
int64_t ToInt64Milliseconds(Duration d);
// ToDoubleMilliseconds()
// ToDoubleSeconds()
// ToDoubleMinutes()
-// ToDoubleHours
+// ToDoubleHours()
//
// Helper functions that convert a Duration to a floating point count of the
// indicated unit. These functions are shorthand for the `FDivDuration()`
// Example:
//
// absl::Duration d = absl::Milliseconds(1500);
-// double dsec = ToDoubleSeconds(d); // dsec == 1.5
+// double dsec = absl::ToDoubleSeconds(d); // dsec == 1.5
double ToDoubleNanoseconds(Duration d);
double ToDoubleMicroseconds(Duration d);
double ToDoubleMilliseconds(Duration d);
//
// `absl::Time` assumes there are 60 seconds in a minute, which means the
// underlying time scales must be "smeared" to eliminate leap seconds.
-// POSIX, for example, legislates that a `time_t` value of `536457599` shall
-// correspond to "1986-12-31 23:59:59 +0000".
-//
+// See https://developers.google.com/time/smear.
//
// Even though `absl::Time` supports a wide range of timestamps, exercise
// caution when using values in the distant past. `absl::Time` uses the
: MakeDuration(std::numeric_limits<int64_t>::min(), ~0U);
}
-// Returns (-n)-1 (equivalently -(n+1)) without overflowing on any input value.
+// Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow.
constexpr int64_t NegateAndSubtractOne(int64_t n) {
+ // Note: Good compilers will optimize this expression to ~n when using
+ // a two's-complement representation (which is required for int64_t).
return (n < 0) ? -(n + 1) : (-n) - 1;
}
constexpr Duration operator-(Duration d) {
// This is a little interesting because of the special cases.
//
- // Infinities stay infinite, and just change direction.
+ // If rep_lo_ is zero, we have it easy; it's safe to negate rep_hi_, we're
+ // dealing with an integral number of seconds, and the only special case is
+ // the maximum negative finite duration, which can't be negated.
//
- // The maximum negative finite duration can't be negated (at least, not
- // on a two's complement machine), so we return infinity for that case.
- // Next we dispatch the case where rep_lo_ is zero, observing that it's
- // safe to negate rep_hi_ in this case because it's not int64_t-min (or
- // else we'd have handled it above, returning InfiniteDuration()).
+ // Infinities stay infinite, and just change direction.
//
// Finally we're in the case where rep_lo_ is non-zero, and we can borrow
// a second's worth of ticks and avoid overflow (as negating int64_t-min + 1
// is safe).
- return time_internal::IsInfiniteDuration(d)
- ? time_internal::OppositeInfinity(d)
- : (time_internal::GetRepHi(d) ==
- std::numeric_limits<int64_t>::min() &&
- time_internal::GetRepLo(d) == 0)
+ return time_internal::GetRepLo(d) == 0
+ ? time_internal::GetRepHi(d) == std::numeric_limits<int64_t>::min()
? InfiniteDuration()
- : (time_internal::GetRepLo(d) == 0)
- ? time_internal::MakeDuration(
- -time_internal::GetRepHi(d))
- : time_internal::MakeDuration(
- time_internal::NegateAndSubtractOne(
- time_internal::GetRepHi(d)),
- time_internal::kTicksPerSecond -
- time_internal::GetRepLo(d));
+ : time_internal::MakeDuration(-time_internal::GetRepHi(d))
+ : time_internal::IsInfiniteDuration(d)
+ ? time_internal::OppositeInfinity(d)
+ : time_internal::MakeDuration(
+ time_internal::NegateAndSubtractOne(
+ time_internal::GetRepHi(d)),
+ time_internal::kTicksPerSecond -
+ time_internal::GetRepLo(d));
}
constexpr Duration Nanoseconds(int64_t n) {
srcs = ["bad_any_cast.cc"],
hdrs = ["bad_any_cast.h"],
copts = ABSL_EXCEPTIONS_FLAG + ABSL_DEFAULT_COPTS,
- features = [
- "-use_header_modules",
- ],
deps = [
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:throw_delegate",
"//absl/meta:type_traits",
- "//absl/strings",
],
)
srcs = ["bad_optional_access.cc"],
hdrs = ["bad_optional_access.h"],
copts = ABSL_DEFAULT_COPTS + ABSL_EXCEPTIONS_FLAG,
- features = [
- "-use_header_modules",
- ],
deps = [
"//absl/base",
"//absl/base:config",
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+list(APPEND TYPES_PUBLIC_HEADERS
+ "any.h"
+ "bad_any_cast.h"
+ "bad_optional_access.h"
+ "optional.h"
+ "span.h"
+)
+
+
+# any library
+absl_header_library(
+ TARGET
+ absl_any
+ PUBLIC_LIBRARIES
+ absl::utility
+ EXPORT_NAME
+ any
+)
+
+# span library
+absl_header_library(
+ TARGET
+ absl_span
+ PUBLIC_LIBRARIES
+ absl::utility
+ EXPORT_NAME
+ span
+)
+
+
+# bad_any_cast library
+list(APPEND BAD_ANY_CAST_SRC
+ "bad_any_cast.cc"
+ ${TYPES_PUBLIC_HEADERS}
+)
+
+absl_library(
+ TARGET
+ absl_bad_any_cast
+ SOURCES
+ ${BAD_ANY_CAST_SRC}
+ PUBLIC_LIBRARIES
+ absl::base absl::any
+ EXPORT_NAME
+ bad_any_cast
+)
+
+
+# optional library
+list(APPEND OPTIONAL_SRC
+ "optional.cc"
+)
+
+absl_library(
+ TARGET
+ absl_optional
+ SOURCES
+ ${OPTIONAL_SRC}
+ PUBLIC_LIBRARIES
+ absl::base
+ EXPORT_NAME
+ optional
+)
+
+
+set(BAD_OPTIONAL_ACCESS_SRC "bad_optional_access.cc")
+set(BAD_OPTIONAL_ACCESS_LIBRARIES absl::base)
+
+absl_library(
+ TARGET
+ absl_bad_optional_access
+ SOURCES
+ ${BAD_OPTIONAL_ACCESS_SRC}
+ PUBLIC_LIBRARIES
+ ${BAD_OPTIONAL_ACCESS_PUBLIC_LIBRARIES}
+ EXPORT_NAME
+ bad_optional_access
+)
+
+
+
+#
+## TESTS
+#
+
+
+# test any_test
+set(ANY_TEST_SRC "any_test.cc")
+set(ANY_TEST_PUBLIC_LIBRARIES absl::base absl::throw_delegate absl::any absl::bad_any_cast test_instance_tracker_lib)
+
+absl_test(
+ TARGET
+ any_test
+ SOURCES
+ ${ANY_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${ANY_TEST_PUBLIC_LIBRARIES}
+ PRIVATE_COMPILE_FLAGS
+ ${ABSL_EXCEPTIONS_FLAG}
+)
+
+
+# test any_test_noexceptions
+absl_test(
+ TARGET
+ any_test_noexceptions
+ SOURCES
+ ${ANY_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${ANY_TEST_PUBLIC_LIBRARIES}
+)
+
+
+# test span_test
+set(SPAN_TEST_SRC "span_test.cc")
+set(SPAN_TEST_PUBLIC_LIBRARIES absl::base absl::strings absl::throw_delegate absl::span test_instance_tracker_lib)
+
+absl_test(
+ TARGET
+ span_test
+ SOURCES
+ ${SPAN_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${SPAN_TEST_PUBLIC_LIBRARIES}
+ PRIVATE_COMPILE_FLAGS
+ ${ABSL_EXCEPTIONS_FLAG}
+)
+
+
+# test span_test_noexceptions
+absl_test(
+ TARGET
+ span_test_noexceptions
+ SOURCES
+ ${SPAN_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${SPAN_TEST_PUBLIC_LIBRARIES}
+)
+
+
+
+# test optional_test
+set(OPTIONAL_TEST_SRC "optional_test.cc")
+set(OPTIONAL_TEST_PUBLIC_LIBRARIES absl::base absl::throw_delegate absl::optional absl_bad_optional_access)
+
+absl_test(
+ TARGET
+ optional_test
+ SOURCES
+ ${OPTIONAL_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${OPTIONAL_TEST_PUBLIC_LIBRARIES}
+)
+
+
namespace any_internal {
-// FastTypeId<Type>() evaluates at compile/link-time to a unique integer for the
-// passed in type. Their values are neither contiguous nor small, making them
-// unfit for using as an index into a vector, but a good match for keys into
-// maps or straight up comparisons.
-// Note that on 64-bit (unix) systems size_t is 64-bit while int is 32-bit and
-// the compiler will happily and quietly assign such a 64-bit value to a
-// 32-bit integer. While a client should never do that it SHOULD still be safe,
-// assuming the BSS segment doesn't span more than 4GiB.
+template <typename Type>
+struct TypeTag {
+ constexpr static char dummy_var = 0;
+};
+
+template <typename Type>
+constexpr char TypeTag<Type>::dummy_var;
+
+// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
+// passed in type. These are meant to be good match for keys into maps or
+// straight up comparisons.
template<typename Type>
-inline size_t FastTypeId() {
- static_assert(sizeof(char*) <= sizeof(size_t),
- "ptr size too large for size_t");
-
- // This static variable isn't actually used, only its address, so there are
- // no concurrency issues.
- static char dummy_var;
- return reinterpret_cast<size_t>(&dummy_var);
+constexpr inline const void* FastTypeId() {
+ return &TypeTag<Type>::dummy_var;
}
} // namespace any_internal
public:
virtual ~ObjInterface() = default;
virtual std::unique_ptr<ObjInterface> Clone() const = 0;
- virtual size_t type_id() const noexcept = 0;
+ virtual const void* ObjTypeId() const noexcept = 0;
#if ABSL_ANY_DETAIL_HAS_RTTI
virtual const std::type_info& Type() const noexcept = 0;
#endif // ABSL_ANY_DETAIL_HAS_RTTI
return std::unique_ptr<ObjInterface>(new Obj(in_place, value));
}
- size_t type_id() const noexcept final { return IdForType<T>(); }
+ const void* ObjTypeId() const noexcept final { return IdForType<T>(); }
#if ABSL_ANY_DETAIL_HAS_RTTI
const std::type_info& Type() const noexcept final { return typeid(T); }
}
template <typename T>
- static size_t IdForType() {
+ constexpr static const void* IdForType() {
// Note: This type dance is to make the behavior consistent with typeid.
using NormalizedType =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
return any_internal::FastTypeId<NormalizedType>();
}
- size_t GetObjTypeId() const {
- return obj_ == nullptr ? any_internal::FastTypeId<void>() : obj_->type_id();
+ const void* GetObjTypeId() const {
+ return obj_ ? obj_->ObjTypeId() : any_internal::FastTypeId<void>();
}
// `absl::any` nonmember functions //
// and `is_nothrow_swappable()` is the same as `std::is_trivial()`.
// * `make_optional()` cannot be declared `constexpr` due to the absence of
// guaranteed copy elision.
+// * The move constructor's `noexcept` specification is stronger, i.e. if the
+// default allocator is non-throwing (via setting
+// `ABSL_ALLOCATOR_NOTHROW`), it evaluates to `noexcept(true)`, because
+// we assume
+// a) move constructors should only throw due to allocation failure and
+// b) if T's move constructor allocates, it uses the same allocation
+// function as the default allocator.
template <typename T>
class optional;
// optional::operator*()
//
- // Accesses the underlying `T `value of an `optional`. If the `optional` is
+ // Accesses the underlying `T` value of an `optional`. If the `optional` is
// empty, behavior is undefined.
constexpr const T& operator*() const & { return reference(); }
T& operator*() & {
EXPECT_TRUE(absl::is_trivially_copy_constructible<
absl::optional<const TrivialCopyable>>::value);
#endif
+ // When testing with VS 2017 15.3, there seems to be a bug in MSVC
+ // std::optional when T is volatile-qualified. So skipping this test.
+ // Bug report:
+ // https://connect.microsoft.com/VisualStudio/feedback/details/3142534
+#if defined(ABSL_HAVE_STD_OPTIONAL) && defined(_MSC_VER) && _MSC_VER >= 1911
+#define ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG 1
+#endif
+#ifndef ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG
EXPECT_FALSE(std::is_copy_constructible<
absl::optional<volatile TrivialCopyable>>::value);
+#endif
}
}
using size_type = size_t;
using difference_type = ptrdiff_t;
- static const size_type npos = -1;
+ static const size_type npos = ~size_type{0};
constexpr Span() noexcept : Span(nullptr, 0) {}
constexpr Span(pointer array, size_type length) noexcept
//
// Returns a reference to the i'th element of this span.
constexpr reference at(size_type i) const {
- return ABSL_PREDICT_FALSE(i < size())
+ return ABSL_PREDICT_TRUE(i < size())
? ptr_[i]
: (base_internal::ThrowStdOutOfRange(
"Span::at failed bounds check"),
--- /dev/null
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+list(APPEND UTILITY_PUBLIC_HEADERS
+ "utility.h"
+)
+
+
+
+list(APPEND UTILITY_SRC
+ "utility.cc"
+ ${UTILITY_PUBLIC_HEADERS}
+)
+
+absl_library(
+ TARGET
+ absl_utility
+ SOURCES
+ ${UTILITY_SRC}
+ PUBLIC_LIBRARIES
+ ${UTILITY_PUBLIC_LIBRARIES}
+ EXPORT_NAME
+ utility
+)
+
+
+
+#
+## TESTS
+#
+
+# test utility_test
+set(UTILITY_TEST_SRC "utility_test.cc")
+set(UTILITY_TEST_PUBLIC_LIBRARIES absl::utility)
+
+absl_test(
+ TARGET
+ utility_test
+ SOURCES
+ ${UTILITY_TEST_SRC}
+ PUBLIC_LIBRARIES
+ ${UTILITY_TEST_PUBLIC_LIBRARIES}
+)
+
+
+
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
-node: 034b6c3e101792a3cc3ccabd9bfaddcabe85bb58
+node: 2355b229ea4c2876f490e726526cdcd8a63c7f54
branch: default
latesttag: 3.3.0
-latesttagdistance: 498
-changessincelatesttag: 685
+latesttagdistance: 503
+changessincelatesttag: 692
public:
EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
{
- EIGEN_ONLY_USED_FOR_DEBUG(outerStride);
+#ifndef EIGEN_INTERNAL_DEBUGGING
+ EIGEN_UNUSED_VARIABLE(outerStride);
+#endif
eigen_internal_assert(outerStride==OuterStride);
}
EIGEN_DEVICE_FUNC Index outerStride() const { return OuterStride; }
*
* \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform
*/
+
+namespace internal
+{
+ // This helper helps nvcc+MSVC to properly parse this file.
+ // See bug 1412.
+ template <typename Scalar, int Dim, int Mode>
+ struct uniformscaling_times_affine_returntype
+ {
+ enum
+ {
+ NewMode = int(Mode) == int(Isometry) ? Affine : Mode
+ };
+ typedef Transform <Scalar, Dim, NewMode> type;
+ };
+}
+
template<typename _Scalar>
class UniformScaling
{
/** Concatenates a uniform scaling and an affine transformation */
template<int Dim, int Mode, int Options>
- inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const
+ inline typename
+ internal::uniformscaling_times_affine_returntype <Scalar,Dim,Mode>::type
+ operator* (const Transform<Scalar, Dim, Mode, Options>& t) const
{
- Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t;
+ typename internal::uniformscaling_times_affine_returntype <Scalar,Dim,Mode> res = t;
res.prescale(factor());
return res;
}
/** Concatenates a uniform scaling and a linear transformation matrix */
// TODO returns an expression
template<typename Derived>
- inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
+ inline typename Eigen::internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
{ return other * m_factor; }
template<typename Derived,int Dim>
// Method to allocate and initialize matrix and attributes
template<typename MatrixType>
-void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+void BDCSVD<MatrixType>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
m_isTranspose = (cols > rows);
//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
template<typename MatrixType>
-void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift)
+void BDCSVD<MatrixType>::divide (Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
{
// requires rows = cols + 1;
using std::pow;
// handling of round-off errors, be consistent in ordering
// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
template <typename MatrixType>
-void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
+void BDCSVD<MatrixType>::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
{
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
using std::abs;
// i >= 1, di almost null and zi non null.
// We use a rotation to zero out zi applied to the left of M
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size)
+void BDCSVD<MatrixType>::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size)
{
using std::abs;
using std::sqrt;
// We apply two rotations to have zj = 0;
// TODO deflation44 is still broken and not properly tested
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size)
+void BDCSVD<MatrixType>::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size)
{
using std::abs;
using std::sqrt;
// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift)
+void BDCSVD<MatrixType>::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
{
using std::sqrt;
using std::abs;
};
template<typename MatrixType, int QRPreconditioner>
-void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
u = (LAPACKE_TYPE*)m_matrixU.data(); \
} else { ldu=1; u=&dummy; }\
MatrixType localV; \
- ldvt = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
+ lapack_int vt_rows = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
if (computeV()) { \
- localV.resize(ldvt, m_cols); \
+ localV.resize(vt_rows, m_cols); \
+ ldvt = internal::convert_index<lapack_int>(localV.outerStride()); \
vt = (LAPACKE_TYPE*)localV.data(); \
} else { ldvt=1; vt=&dummy; }\
Matrix<LAPACKE_RTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
// t_n = exp(sqrt(-1) * pi * n^2 / line_len)
// for n = 0, 1,..., line_len-1.
// For n > 2 we use the recurrence t_n = t_{n-1}^2 / t_{n-2} * t_1^2
- pos_j_base_powered[0] = ComplexScalar(1, 0);
- if (line_len > 1) {
- const RealScalar pi_over_len(EIGEN_PI / line_len);
- const ComplexScalar pos_j_base = ComplexScalar(
- std::cos(pi_over_len), std::sin(pi_over_len));
- pos_j_base_powered[1] = pos_j_base;
- if (line_len > 2) {
- const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base;
- for (int j = 2; j < line_len + 1; ++j) {
- pos_j_base_powered[j] = pos_j_base_powered[j - 1] *
- pos_j_base_powered[j - 1] /
- pos_j_base_powered[j - 2] * pos_j_base_sq;
- }
- }
+
+ // The recurrence is correct in exact arithmetic, but causes
+ // numerical issues for large transforms, especially in
+ // single-precision floating point.
+ //
+ // pos_j_base_powered[0] = ComplexScalar(1, 0);
+ // if (line_len > 1) {
+ // const ComplexScalar pos_j_base = ComplexScalar(
+ // numext::cos(M_PI / line_len), numext::sin(M_PI / line_len));
+ // pos_j_base_powered[1] = pos_j_base;
+ // if (line_len > 2) {
+ // const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base;
+ // for (int i = 2; i < line_len + 1; ++i) {
+ // pos_j_base_powered[i] = pos_j_base_powered[i - 1] *
+ // pos_j_base_powered[i - 1] /
+ // pos_j_base_powered[i - 2] *
+ // pos_j_base_sq;
+ // }
+ // }
+ // }
+ // TODO(rmlarsen): Find a way to use Eigen's vectorized sin
+ // and cosine functions here.
+ for (int j = 0; j < line_len + 1; ++j) {
+ double arg = ((EIGEN_PI * j) * j) / line_len;
+ std::complex<double> tmp(numext::cos(arg), numext::sin(arg));
+ pos_j_base_powered[j] = static_cast<ComplexScalar>(tmp);
}
}
// Calculate the padding
m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
+ // The padding size calculation for PADDING_SAME has been updated to
+ // be consistent with how TensorFlow extracts its paddings.
+ m_rowPaddingTop = numext::maxi<Index>(0, m_rowPaddingTop);
+ m_colPaddingLeft = numext::maxi<Index>(0, m_colPaddingLeft);
break;
default:
eigen_assert(false && "unexpected padding");
}
}
+template <typename RealScalar>
+static void test_fft_non_power_of_2_round_trip(int exponent) {
+ int n = (1 << exponent) + 1;
+
+ Eigen::DSizes<long, 1> dimensions;
+ dimensions[0] = n;
+ const DSizes<long, 1> arr = dimensions;
+ Tensor<RealScalar, 1, ColMajor, long> input;
+
+ input.resize(arr);
+ input.setRandom();
+
+ array<int, 1> fft;
+ fft[0] = 0;
+
+ Tensor<std::complex<RealScalar>, 1, ColMajor> forward =
+ input.template fft<BothParts, FFT_FORWARD>(fft);
+
+ Tensor<RealScalar, 1, ColMajor, long> output =
+ forward.template fft<RealPart, FFT_REVERSE>(fft);
+
+ for (int i = 0; i < n; ++i) {
+ VERIFY_IS_APPROX(input[i], output[i]);
+ }
+}
+
void test_cxx11_tensor_fft() {
test_fft_complex_input_golden();
test_fft_real_input_golden();
test_fft_real_input_energy<RowMajor, double, true, Eigen::BothParts, FFT_FORWARD, 4>();
test_fft_real_input_energy<RowMajor, float, false, Eigen::BothParts, FFT_FORWARD, 4>();
test_fft_real_input_energy<RowMajor, double, false, Eigen::BothParts, FFT_FORWARD, 4>();
+
+ test_fft_non_power_of_2_round_trip<float>(7);
}
}
}
+// Verifies that SAME padding, when computed as negative values, will be clipped
+// to zero.
+void test_patch_padding_same_negative_padding_clip_to_zero() {
+ int input_depth = 1;
+ int input_rows = 15;
+ int input_cols = 1;
+ int input_batches = 1;
+ int ksize = 1; // Corresponds to the Rows and Cols for
+ // tensor.extract_image_patches<>.
+ int row_stride = 5;
+ int col_stride = 1;
+ // ColMajor
+ Tensor<float, 4> tensor(input_depth, input_rows, input_cols, input_batches);
+ // Initializes tensor with incrementing numbers.
+ for (int i = 0; i < tensor.size(); ++i) {
+ tensor.data()[i] = i + 1;
+ }
+ Tensor<float, 5> result = tensor.extract_image_patches(
+ ksize, ksize, row_stride, col_stride, 1, 1, PADDING_SAME);
+ // row padding will be computed as -2 originally and then be clipped to 0.
+ VERIFY_IS_EQUAL(result.coeff(0), 1.0f);
+ VERIFY_IS_EQUAL(result.coeff(1), 6.0f);
+ VERIFY_IS_EQUAL(result.coeff(2), 11.0f);
+
+ VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result.dimension(3), 3); // number of patches
+ VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0));
+
+ Tensor<float, 5, RowMajor> result_row_major =
+ tensor_row_major.extract_image_patches(ksize, ksize, row_stride,
+ col_stride, 1, 1, PADDING_SAME);
+ VERIFY_IS_EQUAL(result_row_major.coeff(0), 1.0f);
+ VERIFY_IS_EQUAL(result_row_major.coeff(1), 6.0f);
+ VERIFY_IS_EQUAL(result_row_major.coeff(2), 11.0f);
+
+ VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0));
+}
+
void test_patch_no_extra_dim()
{
Tensor<float, 3> tensor(2,3,5);
CALL_SUBTEST_4(test_patch_padding_valid_same_value());
CALL_SUBTEST_5(test_patch_padding_same());
CALL_SUBTEST_6(test_imagenet_patches());
+ CALL_SUBTEST_7(test_patch_padding_same_negative_padding_clip_to_zero());
}
--- /dev/null
+set(PACKAGE_VERSION "@VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@")
+
+# Check whether the requested PACKAGE_FIND_VERSION is compatible
+if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
+ set(PACKAGE_VERSION_COMPATIBLE FALSE)
+else()
+ set(PACKAGE_VERSION_COMPATIBLE TRUE)
+ if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
+ set(PACKAGE_VERSION_EXACT TRUE)
+ endif()
+endif()
SET(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/google/flatbuffers")
SET(CPACK_DEBIAN_PACKAGE_MAINTAINER "Vitaly Isaev <vitalyisaev2@gmail.com>")
- # Derive package version from git
- EXECUTE_PROCESS(
- COMMAND date +%Y%m%d
- OUTPUT_VARIABLE DATE
- OUTPUT_STRIP_TRAILING_WHITESPACE
- )
- EXECUTE_PROCESS(
- COMMAND git describe
- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
- OUTPUT_VARIABLE GIT_DESCRIBE_DIRTY
- OUTPUT_STRIP_TRAILING_WHITESPACE
- )
- string(REGEX REPLACE "^v([0-9]+)\\..*" "\\1" VERSION_MAJOR "${GIT_DESCRIBE_DIRTY}")
- string(REGEX REPLACE "^v[0-9]+\\.([0-9]+).*" "\\1" VERSION_MINOR "${GIT_DESCRIBE_DIRTY}")
- string(REGEX REPLACE "^v[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1" VERSION_PATCH "${GIT_DESCRIBE_DIRTY}")
- string(REGEX REPLACE "^v[0-9]+\\.[0-9]+\\.[0-9]+\\-([0-9]+).*" "\\1" VERSION_COMMIT "${GIT_DESCRIBE_DIRTY}")
SET(CPACK_PACKAGE_VERSION_MAJOR ${VERSION_MAJOR})
SET(CPACK_PACKAGE_VERSION_MINOR ${VERSION_MINOR})
SET(CPACK_PACKAGE_VERSION_PATCH ${VERSION_PATCH})
# Package name
SET(CPACK_DEBIAN_PACKAGE_NAME "flatbuffers")
SET(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE.txt)
- SET(CPACK_PACKAGE_FILE_NAME
+ SET(CPACK_PACKAGE_FILE_NAME
"${CPACK_DEBIAN_PACKAGE_NAME}_${CPACK_DEBIAN_PACKAGE_VERSION}_${CPACK_DEBIAN_PACKAGE_ARCHITECTURE}")
endif(UNIX)
--- /dev/null
+find_program(GIT git)
+execute_process(
+ COMMAND ${GIT} describe
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+ OUTPUT_VARIABLE GIT_DESCRIBE_DIRTY
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+)
+string(REGEX REPLACE "^v([0-9]+)\\..*" "\\1" VERSION_MAJOR "${GIT_DESCRIBE_DIRTY}")
+string(REGEX REPLACE "^v[0-9]+\\.([0-9]+).*" "\\1" VERSION_MINOR "${GIT_DESCRIBE_DIRTY}")
+string(REGEX REPLACE "^v[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1" VERSION_PATCH "${GIT_DESCRIBE_DIRTY}")
+string(REGEX REPLACE "^v[0-9]+\\.[0-9]+\\.[0-9]+\\-([0-9]+).*" "\\1" VERSION_COMMIT "${GIT_DESCRIBE_DIRTY}")
endif(CYGWIN)
set(CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} -Wall -pedantic -Werror -Wextra -Werror=shadow")
- if (GCC_VERSION VERSION_GREATER 4.4)
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.4)
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
+ set(CMAKE_CXX_FLAGS
+ "${CMAKE_CXX_FLAGS} -faligned-new")
+ endif()
set(CMAKE_CXX_FLAGS
- "${CMAKE_CXX_FLAGS} -Wunused-result -Werror=unused-result \
- -Wunused-parameter -Werror=unused-parameter")
+ "${CMAKE_CXX_FLAGS} -Wunused-result -Werror=unused-result -Wunused-parameter -Werror=unused-parameter")
endif()
# Certain platforms such as ARM do not use signed chars by default
# - minor updated when there are additions in API/ABI
# - major (ABI number) updated when there are changes in ABI (or removals)
set(FlatBuffers_Library_SONAME_MAJOR "1")
- set(FlatBuffers_Library_SONAME_FULL "${FlatBuffers_Library_SONAME_MAJOR}.8.0")
+ set(FlatBuffers_Library_SONAME_FULL "${FlatBuffers_Library_SONAME_MAJOR}.9.0")
set_target_properties(flatbuffers_shared PROPERTIES OUTPUT_NAME flatbuffers
SOVERSION "${FlatBuffers_Library_SONAME_MAJOR}"
VERSION "${FlatBuffers_Library_SONAME_FULL}")
target_link_libraries(grpctest grpc++_unsecure grpc_unsecure gpr pthread dl)
endif()
+include(CMake/Version.cmake)
+
if(FLATBUFFERS_INSTALL)
include(GNUInstallDirs)
set(FB_CMAKE_DIR "${CMAKE_INSTALL_LIBDIR}/cmake/flatbuffers")
+ configure_file(CMake/FlatbuffersConfigVersion.cmake.in FlatbuffersConfigVersion.cmake @ONLY)
install(
- FILES "CMake/FlatbuffersConfig.cmake"
+ FILES "CMake/FlatbuffersConfig.cmake" "${CMAKE_CURRENT_BINARY_DIR}/FlatbuffersConfigVersion.cmake"
DESTINATION ${FB_CMAKE_DIR}
)
class FlatbuffersConan(ConanFile):
name = "flatbuffers"
- version = "1.8.0"
+ version = "1.9.0"
license = "https://github.com/google/flatbuffers/blob/master/LICENSE.txt"
url = "https://github.com/google/flatbuffers"
description = "Memory Efficient Serialization Library"
# Overview {#flatbuffers_overview}
[FlatBuffers](@ref flatbuffers_overview) is an efficient cross platform
-serialization library for C++, C#, C, Go, Java, JavaScript, PHP, and Python.
+serialization library for C++, C#, C, Go, Java, JavaScript, TypeScript, PHP, and Python.
It was originally created at Google for game development and other
performance-critical applications.
in your own programs.
- How to [use the generated Go code](@ref flatbuffers_guide_use_go) in your
own programs.
+- How to [use the generated JavaScript code](@ref flatbuffers_guide_use_javascript) in your
+ own programs.
+- How to [use the generated TypeScript code](@ref flatbuffers_guide_use_typescript) in your
+ own programs.
- How to [use FlatBuffers in C with `flatcc`](@ref flatbuffers_guide_use_c) in your
own programs.
- [Support matrix](@ref flatbuffers_support) for platforms/languages/features.
namespace\_decl = `namespace` ident ( `.` ident )* `;`
-attribute\_decl = `attribute` string\_constant `;`
+attribute\_decl = `attribute` ident | `"`ident`"` `;`
type\_decl = ( `table` | `struct` ) ident metadata `{` field\_decl+ `}`
Predeclare all data types since circular references between types are allowed
(circular references between object are not, though).
- MANUALLY_ALIGNED_STRUCT(4) Vec3 {
+ FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) Vec3 {
private:
float x_;
float y_;
float y() const { return flatbuffers::EndianScalar(y_); }
float z() const { return flatbuffers::EndianScalar(z_); }
};
- STRUCT_END(Vec3, 12);
+ FLATBUFFERS_STRUCT_END(Vec3, 12);
These ugly macros do a couple of things: they turn off any padding the compiler
might normally do, since we add padding manually (though none in this example),
NOTE: this table is a start, it needs to be extended.
-Feature | C++ | Java | C# | Go | Python | JS | C | PHP | Ruby
------------------------------- | ------ | ------ | ------ | ------ | ------ | --------- | ------ | --- | ----
-Codegen for all basic features | Yes | Yes | Yes | Yes | Yes | Yes | Yes | WiP | WiP
-JSON parsing | Yes | No | No | No | No | No | Yes | No | No
-Simple mutation | Yes | Yes | Yes | Yes | No | No | No | No | No
-Reflection | Yes | No | No | No | No | No | Basic | No | No
-Buffer verifier | Yes | No | No | No | No | No | Yes | No | No
-Testing: basic | Yes | Yes | Yes | Yes | Yes | Yes | Yes | ? | ?
-Testing: fuzz | Yes | No | No | Yes | Yes | No | No | ? | ?
-Performance: | Superb | Great | Great | Great | Ok | ? | Superb | ? | ?
-Platform: Windows | VS2010 | Yes | Yes | ? | ? | ? | VS2010 | ? | ?
-Platform: Linux | GCC282 | Yes | ? | Yes | Yes | ? | Yes | ? | ?
-Platform: OS X | Xcode4 | ? | ? | ? | Yes | ? | Yes | ? | ?
-Platform: Android | NDK10d | Yes | ? | ? | ? | ? | ? | ? | ?
-Platform: iOS | ? | ? | ? | ? | ? | ? | ? | ? | ?
-Engine: Unity | ? | ? | Yes | ? | ? | ? | ? | ? | ?
-Primary authors (github) | gwvo | gwvo | ev*/js*| rw | rw | evanw/ev* | mik* | ch* | rw
+Feature | C++ | Java | C# | Go | Python | JS | TS | C | PHP | Ruby
+------------------------------ | ------ | ------ | ------ | ------ | ------ | --------- | --------- | ------ | --- | ----
+Codegen for all basic features | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | WiP | WiP
+JSON parsing | Yes | No | No | No | No | No | No | Yes | No | No
+Simple mutation | Yes | Yes | Yes | Yes | No | No | No | No | No | No
+Reflection | Yes | No | No | No | No | No | No | Basic | No | No
+Buffer verifier | Yes | No | No | No | No | No | No | Yes | No | No
+Testing: basic | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | ? | ?
+Testing: fuzz | Yes | No | No | Yes | Yes | No | No | No | ? | ?
+Performance: | Superb | Great | Great | Great | Ok | ? | ? | Superb | ? | ?
+Platform: Windows | VS2010 | Yes | Yes | ? | ? | ? | Yes | VS2010 | ? | ?
+Platform: Linux | GCC282 | Yes | ? | Yes | Yes | ? | Yes | Yes | ? | ?
+Platform: OS X | Xcode4 | ? | ? | ? | Yes | ? | Yes | Yes | ? | ?
+Platform: Android | NDK10d | Yes | ? | ? | ? | ? | ? | ? | ? | ?
+Platform: iOS | ? | ? | ? | ? | ? | ? | ? | ? | ? | ?
+Engine: Unity | ? | ? | Yes | ? | ? | ? | ? | ? | ? | ?
+Primary authors (github) | gwvo | gwvo | ev*/js*| rw | rw | evanw/ev* | kr | mik* | ch* | rw
* ev = evolutional
* js = jonsimantov
* mik = mikkelfj
* ch = chobie
+ * kr = krojew
<br>
<input type="radio" name="language" value="go">Go</input>
<input type="radio" name="language" value="python">Python</input>
<input type="radio" name="language" value="javascript">JavaScript</input>
+ <input type="radio" name="language" value="typescript">TypeScript</input>
<input type="radio" name="language" value="php">PHP</input>
<input type="radio" name="language" value="c">C</input>
</form>
<div class="language-javascript">
[samplebinary.js](https://github.com/google/flatbuffers/blob/master/samples/samplebinary.js)
</div>
+<div class="language-typescript">
+<em>none yet</em>
+</div>
<div class="language-php">
[SampleBinary.php](https://github.com/google/flatbuffers/blob/master/samples/SampleBinary.php)
</div>
<div class="language-javascript">
~~~{.sh}
cd flatbuffers/sample
- ./../flatc --javascript samples/monster.fbs
+ ./../flatc --js samples/monster.fbs
+~~~
+</div>
+<div class="language-typescript">
+~~~{.sh}
+ cd flatbuffers/sample
+ ./../flatc --ts samples/monster.fbs
~~~
</div>
<div class="language-php">
<script src="monster_generated.js"></script> // Generated by `flatc`.
~~~
</div>
+<div class="language-typescript">
+ // note: import flabuffers with your desired import method
+
+ import { MyGame } from './monster_generated';
+</div>
<div class="language-php">
~~~{.php}
// It is recommended that your use PSR autoload when using FlatBuffers in PHP.
var builder = new flatbuffers.Builder(1024);
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ // Create a `flatbuffer.Builder`, which will be used to create our
+ // monsters' FlatBuffers.
+ let builder = new flatbuffers.Builder(1024);
+~~~
+</div>
<div class="language-php">
~~~{.php}
// Create a `FlatBufferBuilder`, which will be used to create our
var axe = MyGame.Sample.Weapon.endWeapon(builder);
~~~
</div>
+<div class="language-typescript">
+~~~{.js}
+ let weaponOne = builder.createString('Sword');
+ let weaponTwo = builder.createString('Axe');
+
+ // Create the first `Weapon` ('Sword').
+ MyGame.Sample.Weapon.startWeapon(builder);
+ MyGame.Sample.Weapon.addName(builder, weaponOne);
+ MyGame.Sample.Weapon.addDamage(builder, 3);
+ let sword = MyGame.Sample.Weapon.endWeapon(builder);
+
+ // Create the second `Weapon` ('Axe').
+ MyGame.Sample.Weapon.startWeapon(builder);
+ MyGame.Sample.Weapon.addName(builder, weaponTwo);
+ MyGame.Sample.Weapon.addDamage(builder, 5);
+ let axe = MyGame.Sample.Weapon.endWeapon(builder);
+~~~
+</div>
<div class="language-php">
~~~{.php}
// Create the `Weapon`s using the `createWeapon()` helper function.
var inv = MyGame.Sample.Monster.createInventoryVector(builder, treasure);
~~~
</div>
+<div class="language-typescript">
+~~~{.js}
+ // Serialize a name for our monster, called 'Orc'.
+ let name = builder.createString('Orc');
+
+ // Create a `vector` representing the inventory of the Orc. Each number
+ // could correspond to an item that can be claimed after he is slain.
+ let treasure = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ let inv = MyGame.Sample.Monster.createInventoryVector(builder, treasure);
+~~~
+</div>
<div class="language-php">
~~~{.php}
// Serialize a name for our monster, called "Orc".
var weapons = MyGame.Sample.Monster.createWeaponsVector(builder, weaps);
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ // Create an array from the two `Weapon`s and pass it to the
+ // `createWeaponsVector()` method to create a FlatBuffer vector.
+ let weaps = [sword, axe];
+ let weapons = MyGame.Sample.Monster.createWeaponsVector(builder, weaps);
+~~~
+</div>
<div class="language-php">
~~~{.php}
// Create an array from the two `Weapon`s and pass it to the
var path = builder.endVector();
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ MyGame.Sample.Monster.startPathVector(builder, 2);
+ MyGame.Sample.Vec3.createVec3(builder, 1.0, 2.0, 3.0);
+ MyGame.Sample.Vec3.createVec3(builder, 4.0, 5.0, 6.0);
+ let path = builder.endVector();
+~~~
+</div>
<div class="language-php">
~~~{.php}
\MyGame\Example\Monster::StartPathVector($builder, 2);
<div class="language-cpp">
~~~{.cpp}
+ // Create the position struct
+ auto position = Vec3(1.0f, 2.0f, 3.0f);
+
// Set his hit points to 300 and his mana to 150.
int hp = 300;
int mana = 150;
// Finally, create the monster using the `CreateMonster` helper function
// to set all fields.
- auto orc = CreateMonster(builder, Vec3(1.0f, 2.0f, 3.0f), mana, hp, name,
- inventory, Color_Red, weapons, Equipment_Weapon,
- axe.Union(), path);
+ auto orc = CreateMonster(builder, &position, mana, hp, name, inventory,
+ Color_Red, weapons, Equipment_Weapon, axe.Union(),
+ path);
~~~
</div>
<div class="language-java">
var orc = MyGame.Sample.Monster.endMonster(builder);
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ // Create our monster by using `startMonster()` and `endMonster()`.
+ MyGame.Sample.Monster.startMonster(builder);
+ MyGame.Sample.Monster.addPos(builder,
+ MyGame.Sample.Vec3.createVec3(builder, 1.0, 2.0, 3.0));
+ MyGame.Sample.Monster.addHp(builder, 300);
+ MyGame.Sample.Monster.addColor(builder, MyGame.Sample.Color.Red)
+ MyGame.Sample.Monster.addName(builder, name);
+ MyGame.Sample.Monster.addInventory(builder, inv);
+ MyGame.Sample.Monster.addWeapons(builder, weapons);
+ MyGame.Sample.Monster.addEquippedType(builder, MyGame.Sample.Equipment.Weapon);
+ MyGame.Sample.Monster.addEquipped(builder, axe);
+ MyGame.Sample.Monster.addPath(builder, path);
+ let orc = MyGame.Sample.Monster.endMonster(builder);
+~~~
+</div>
<div class="language-php">
~~~{.php}
// Create our monster by using `StartMonster()` and `EndMonster()`.
// You can use this code instead of `CreateMonster()`, to create our orc
// manually.
MonsterBuilder monster_builder(builder);
- monster_builder.add_pos(&pos);
- auto pos = Vec3(1.0f, 2.0f, 3.0f);
+ monster_builder.add_pos(&position);
monster_builder.add_hp(hp);
monster_builder.add_name(name);
monster_builder.add_inventory(inventory);
monster_builder.add_color(Color_Red);
monster_builder.add_weapons(weapons);
monster_builder.add_equipped_type(Equipment_Weapon);
- monster_builder.add_equpped(axe.Union());
+ monster_builder.add_equipped(axe.Union());
auto orc = monster_builder.Finish();
~~~
</div>
MyGame.Sample.Monster.addEquipped(builder, axe); // Union data
~~~
</div>
+<div class="language-typescript">
+ ~~~{.ts}
+ MyGame.Sample.Monster.addEquippedType(builder, MyGame.Sample.Equipment.Weapon); // Union type
+ MyGame.Sample.Monster.addEquipped(builder, axe); // Union data
+ ~~~
+</div>
<div class="language-php">
~~~{.php}
\MyGame\Sample\Monster::AddEquippedType($builder, \MyGame\Sample\Equipment::Weapon); // Union type
// orc);`.
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ // Call `finish()` to instruct the builder that this monster is complete.
+ builder.finish(orc); // You could also call `MyGame.Sample.Monster.finishMonsterBuffer(builder,
+ // orc);`.
+~~~
+</div>
<div class="language-php">
~~~{.php}
// Call `finish()` to instruct the builder that this monster is complete.
var buf = builder.asUint8Array(); // Of type `Uint8Array`.
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ // This must be called after `finish()`.
+ let buf = builder.asUint8Array(); // Of type `Uint8Array`.
+~~~
+</div>
<div class="language-php">
~~~{.php}
// This must be called after `finish()`.
<script src="monster_generated.js"></script> // Generated by `flatc`.
~~~
</div>
+<div class="language-typescript">
+~~~{.js}
+ // note: import flabuffers with your desired import method
+
+ import { MyGame } from './monster_generated';
+~~~
+</div>
<div class="language-php">
~~~{.php}
// It is recommended that your use PSR autoload when using FlatBuffers in PHP.
var monster = MyGame.Sample.Monster.getRootAsMonster(buf);
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ let bytes = /* the data you just read, in an object of type "Uint8Array" */
+ let buf = new flatbuffers.ByteBuffer(bytes);
+
+ // Get an accessor to the root object inside the buffer.
+ let monster = MyGame.Sample.Monster.getRootAsMonster(buf);
+~~~
+</div>
<div class="language-php">
~~~{.php}
$bytes = /* the data you just read, in a string */
var name = $monster.name();
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ let hp = $monster.hp();
+ let mana = $monster.mana();
+ let name = $monster.name();
+~~~
+</div>
<div class="language-php">
~~~{.php}
$hp = $monster->getHp();
var z = pos.z();
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ let pos = monster.pos();
+ let x = pos.x();
+ let y = pos.y();
+ let z = pos.z();
+~~~
+</div>
<div class="language-php">
~~~{.php}
$pos = $monster->getPos();
var thirdItem = monster.inventory(2);
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ let invLength = monster.inventoryLength();
+ let thirdItem = monster.inventory(2);
+~~~
+</div>
<div class="language-php">
~~~{.php}
$inv_len = $monster->getInventoryLength();
var secondWeaponDamage = monster.weapons(1).damage();
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ let weaponsLength = monster.weaponsLength();
+ let secondWeaponName = monster.weapons(1).name();
+ let secondWeaponDamage = monster.weapons(1).damage();
+~~~
+</div>
<div class="language-php">
~~~{.php}
$weapons_len = $monster->getWeaponsLength();
}
~~~
</div>
+<div class="language-typescript">
+~~~{.ts}
+ let unionType = monster.equippedType();
+
+ if (unionType == MyGame.Sample.Equipment.Weapon) {
+ let weapon_name = monster.equipped(new MyGame.Sample.Weapon()).name(); // 'Axe'
+ let weapon_damage = monster.equipped(new MyGame.Sample.Weapon()).damage(); // 5
+ }
+~~~
+</div>
<div class="language-php">
~~~{.php}
$union_type = $monster->getEquippedType();
</div>
<div class="language-javascript">
~~~{.js}
- <API for mutating FlatBuffers is not yet support in JavaScript.>
+ <API for mutating FlatBuffers is not yet supported in JavaScript.>
+~~~
+</div>
+<div class="language-typescript">
+~~~{.ts}
+ <API for mutating FlatBuffers is not yet supported in TypeScript.>
~~~
</div>
<div class="language-php">
<div class="language-javascript">
[Use in JavaScript](@ref flatbuffers_guide_use_javascript)
</div>
+<div class="language-typescript">
+[Use in TypeScript](@ref flatbuffers_guide_use_typescript)
+</div>
<div class="language-php">
[Use in PHP](@ref flatbuffers_guide_use_php)
</div>
--- /dev/null
+Use in TypeScript {#flatbuffers_guide_use_typescript}
+=================
+
+## Before you get started
+
+Before diving into the FlatBuffers usage in TypeScript, it should be noted that
+the [Tutorial](@ref flatbuffers_guide_tutorial) page has a complete guide to
+general FlatBuffers usage in all of the supported languages
+(including TypeScript). This page is specifically designed to cover the nuances
+of FlatBuffers usage in TypeScript.
+
+You should also have read the [Building](@ref flatbuffers_guide_building)
+documentation to build `flatc` and should be familiar with
+[Using the schema compiler](@ref flatbuffers_guide_using_schema_compiler) and
+[Writing a schema](@ref flatbuffers_guide_writing_schema).
+
+## FlatBuffers TypeScript library code location
+
+The code for the FlatBuffers TypeScript library can be found at
+`flatbuffers/js` with typings available at @types/flatubffers.
+
+## Testing the FlatBuffers TypeScript library
+
+To run the tests, use the [TypeScriptTest.sh](https://github.com/google/
+flatbuffers/blob/master/tests/TypeScriptTest.sh) shell script.
+
+*Note: The TypeScript test file requires [Node.js](https://nodejs.org/en/).*
+
+## Using the FlatBuffers TypeScript libary
+
+*Note: See [Tutorial](@ref flatbuffers_guide_tutorial) for a more in-depth
+example of how to use FlatBuffers in TypeScript.*
+
+FlatBuffers supports both reading and writing FlatBuffers in TypeScript.
+
+To use FlatBuffers in your own code, first generate TypeScript classes from your
+schema with the `--ts` option to `flatc`. Then you can include both FlatBuffers
+and the generated code to read or write a FlatBuffer.
+
+For example, here is how you would read a FlatBuffer binary file in TypeScript:
+First, include the library and generated code. Then read the file into an
+`Uint8Array`. Make a `flatbuffers.ByteBuffer` out of the `Uint8Array`, and pass
+the ByteBuffer to the `getRootAsMonster` function.
+
+~~~{.ts}
+ // note: import flabuffers with your desired import method
+
+ import { MyGame } from './monster_generated';
+
+ let data = new Uint8Array(fs.readFileSync('monster.dat'));
+ let buf = new flatbuffers.ByteBuffer(data);
+
+ let monster = MyGame.Example.Monster.getRootAsMonster(buf);
+~~~
+
+Now you can access values like this:
+
+~~~{.ts}
+ let hp = monster.hp();
+ let pos = monster.pos();
+~~~
+
+## Text parsing FlatBuffers in TypeScript
+
+There currently is no support for parsing text (Schema's and JSON) directly
+from TypeScript.
title="Use in Java/C#"/>
<tab type="user" url="@ref flatbuffers_guide_use_javascript"
title="Use in JavaScript"/>
+ <tab type="user" url="@ref flatbuffers_guide_use_typescript"
+ title="Use in TypeScript"/>
<tab type="user" url="@ref flatbuffers_guide_use_php"
title="Use in PHP"/>
<tab type="user" url="@ref flatbuffers_guide_use_python"
/// @defgroup flatbuffers_javascript_api JavaScript API
/// @brief FlatBuffers API for JavaScript
+/// @defgroup flatbuffers_typescript_api TypeScript API
+/// @brief FlatBuffers API for TypeScript
+
/// @defgroup flatbuffers_php_api PHP API
/// @brief FlatBuffers API for PHP
--- /dev/null
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go",
+ srcs = [
+ "builder.go",
+ "doc.go",
+ "encode.go",
+ "grpc.go",
+ "lib.go",
+ "sizes.go",
+ "struct.go",
+ "table.go",
+ ],
+ importpath = "github.com/google/flatbuffers/go",
+ visibility = ["//visibility:public"],
+)
#include <assert.h>
+#if !defined(FLATBUFFERS_ASSERT)
+#define FLATBUFFERS_ASSERT assert
+#endif
+
#ifndef ARDUINO
#include <cstdint>
#endif
#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
#define FLATBUFFERS_VERSION_MAJOR 1
-#define FLATBUFFERS_VERSION_MINOR 8
+#define FLATBUFFERS_VERSION_MINOR 9
#define FLATBUFFERS_VERSION_REVISION 0
#define FLATBUFFERS_STRING_EXPAND(X) #X
#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
#define FLATBUFFERS_CONSTEXPR
#endif
+#if (defined(__cplusplus) && __cplusplus >= 201402L) || \
+ (defined(__cpp_constexpr) && __cpp_constexpr >= 201304)
+ #define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR
+#else
+ #define FLATBUFFERS_CONSTEXPR_CPP14
+#endif
+
#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46 || \
defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026
#define FLATBUFFERS_NOEXCEPT noexcept
u.i = FLATBUFFERS_BYTESWAP64(u.i);
return u.t;
} else {
- assert(0);
+ FLATBUFFERS_ASSERT(0);
}
}
inline void EndianCheck() {
int endiantest = 1;
// If this fails, see FLATBUFFERS_LITTLEENDIAN above.
- assert(*reinterpret_cast<char *>(&endiantest) == FLATBUFFERS_LITTLEENDIAN);
+ FLATBUFFERS_ASSERT(*reinterpret_cast<char *>(&endiantest) ==
+ FLATBUFFERS_LITTLEENDIAN);
(void)endiantest;
}
typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
return_type Get(uoffset_t i) const {
- assert(i < size());
+ FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<T>::Read(Data(), i);
}
// Change elements if you have a non-const pointer to this object.
// Scalars only. See reflection.h, and the documentation.
void Mutate(uoffset_t i, const T &val) {
- assert(i < size());
+ FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
// "val" points to the new table/string, as you can obtain from
// e.g. reflection::AddFlatBuffer().
void MutateOffset(uoffset_t i, const uint8_t *val) {
- assert(i < size());
- assert(sizeof(T) == sizeof(uoffset_t));
+ FLATBUFFERS_ASSERT(i < size());
+ static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
WriteScalar(data() + i,
static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
}
// Get a mutable pointer to tables/strings inside this vector.
mutable_return_type GetMutableObject(uoffset_t i) const {
- assert(i < size());
+ FLATBUFFERS_ASSERT(i < size());
return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
}
virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
- assert(new_size > old_size); // vector_downward only grows
+ FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows
uint8_t *new_p = allocate(new_size);
memcpy_downward(old_p, old_size, new_p, new_size, in_use_back,
in_use_front);
reserved_(reserved),
cur_(cur),
size_(sz) {
- assert(allocator_);
+ FLATBUFFERS_ASSERT(allocator_);
}
DetachedBuffer(DetachedBuffer &&other)
inline void destroy() {
if (buf_) {
- assert(allocator_);
+ FLATBUFFERS_ASSERT(allocator_);
allocator_->deallocate(buf_, reserved_);
}
if (own_allocator_ && allocator_) { delete allocator_; }
buf_(nullptr),
cur_(nullptr),
scratch_(nullptr) {
- assert(allocator_);
+ FLATBUFFERS_ASSERT(allocator_);
}
~vector_downward() {
if (buf_) {
- assert(allocator_);
+ FLATBUFFERS_ASSERT(allocator_);
allocator_->deallocate(buf_, reserved_);
}
if (own_allocator_ && allocator_) { delete allocator_; }
void reset() {
if (buf_) {
- assert(allocator_);
+ FLATBUFFERS_ASSERT(allocator_);
allocator_->deallocate(buf_, reserved_);
buf_ = nullptr;
}
}
size_t ensure_space(size_t len) {
- assert(cur_ >= scratch_ && scratch_ >= buf_);
+ FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
// Beyond this, signed offsets may not have enough range:
// (FlatBuffers > 2GB not supported).
- assert(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
+ FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
return len;
}
size_t capacity() const { return reserved_; }
uint8_t *data() const {
- assert(cur_);
+ FLATBUFFERS_ASSERT(cur_);
return cur_;
}
uint8_t *scratch_data() const {
- assert(buf_);
+ FLATBUFFERS_ASSERT(buf_);
return buf_;
}
uint8_t *scratch_end() const {
- assert(scratch_);
+ FLATBUFFERS_ASSERT(scratch_);
return scratch_;
}
uint8_t *scratch_; // Points to the end of the scratchpad in use.
void reallocate(size_t len) {
- assert(allocator_);
+ FLATBUFFERS_ASSERT(allocator_);
auto old_reserved = reserved_;
auto old_size = size();
auto old_scratch_size = scratch_size();
// FlatBufferBuilder::Finish with your root table.
// If you really need to access an unfinished buffer, call
// GetCurrentBufferPointer instead.
- assert(finished);
+ FLATBUFFERS_ASSERT(finished);
}
/// @endcond
// Align to ensure GetSize() below is correct.
Align(sizeof(uoffset_t));
// Offset must refer to something already in buffer.
- assert(off && off <= GetSize());
+ FLATBUFFERS_ASSERT(off && off <= GetSize());
return GetSize() - off + static_cast<uoffset_t>(sizeof(uoffset_t));
}
// Ignoring this assert may appear to work in simple cases, but the reason
// it is here is that storing objects in-line may cause vtable offsets
// to not fit anymore. It also leads to vtable duplication.
- assert(!nested);
+ FLATBUFFERS_ASSERT(!nested);
// If you hit this, fields were added outside the scope of a table.
- assert(!num_field_loc);
+ FLATBUFFERS_ASSERT(!num_field_loc);
}
// From generated code (or from the parser), we call StartTable/EndTable
// resulting vtable offset.
uoffset_t EndTable(uoffset_t start) {
// If you get this assert, a corresponding StartTable wasn't called.
- assert(nested);
+ FLATBUFFERS_ASSERT(nested);
// Write the vtable offset, which is the start of any Table.
// We fill it's value later.
auto vtableoffsetloc = PushElement<soffset_t>(0);
FieldIndexToOffset(0));
buf_.fill_big(max_voffset_);
auto table_object_size = vtableoffsetloc - start;
- assert(table_object_size < 0x10000); // Vtable use 16bit offsets.
+ // Vtable use 16bit offsets.
+ FLATBUFFERS_ASSERT(table_object_size < 0x10000);
WriteScalar<voffset_t>(buf_.data() + sizeof(voffset_t),
static_cast<voffset_t>(table_object_size));
WriteScalar<voffset_t>(buf_.data(), max_voffset_);
auto field_location = reinterpret_cast<FieldLoc *>(it);
auto pos = static_cast<voffset_t>(vtableoffsetloc - field_location->off);
// If this asserts, it means you've set a field twice.
- assert(!ReadScalar<voffset_t>(buf_.data() + field_location->id));
+ FLATBUFFERS_ASSERT(
+ !ReadScalar<voffset_t>(buf_.data() + field_location->id));
WriteScalar<voffset_t>(buf_.data() + field_location->id, pos);
}
ClearOffsets();
auto vtable_ptr = table_ptr - ReadScalar<soffset_t>(table_ptr);
bool ok = ReadScalar<voffset_t>(vtable_ptr + field) != 0;
// If this fails, the caller will show what field needs to be set.
- assert(ok);
+ FLATBUFFERS_ASSERT(ok);
(void)ok;
}
/// @cond FLATBUFFERS_INTERNAL
uoffset_t EndVector(size_t len) {
- assert(nested); // Hit if no corresponding StartVector.
+ FLATBUFFERS_ASSERT(nested); // Hit if no corresponding StartVector.
nested = false;
return PushElement(static_cast<uoffset_t>(len));
}
(file_identifier ? kFileIdentifierLength : 0),
minalign_);
if (file_identifier) {
- assert(strlen(file_identifier) == kFileIdentifierLength);
+ FLATBUFFERS_ASSERT(strlen(file_identifier) == kFileIdentifierLength);
PushBytes(reinterpret_cast<const uint8_t *>(file_identifier),
kFileIdentifierLength);
}
/// This function is UNDEFINED for FlatBuffers whose schema does not include
/// a file_identifier (likely points at padding or the start of a the root
/// vtable).
-inline const char *GetBufferIdentifier(const void *buf) {
- return reinterpret_cast<const char *>(buf) + sizeof(uoffset_t);
+inline const char *GetBufferIdentifier(const void *buf, bool size_prefixed = false) {
+ return reinterpret_cast<const char *>(buf) +
+ ((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t));
}
// Helper to see if the identifier in a buffer has the expected value.
-inline bool BufferHasIdentifier(const void *buf, const char *identifier) {
- return strncmp(GetBufferIdentifier(buf), identifier,
+inline bool BufferHasIdentifier(const void *buf, const char *identifier, bool size_prefixed = false) {
+ return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier,
FlatBufferBuilder::kFileIdentifierLength) == 0;
}
bool Check(bool ok) const {
// clang-format off
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
- assert(ok);
+ FLATBUFFERS_ASSERT(ok);
#endif
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
if (!ok)
// or the buffer is corrupt.
// Assert, because calling this function with bad data may cause reads
// outside of buffer boundaries.
- assert(false);
+ FLATBUFFERS_ASSERT(false);
return nullptr;
}
+/// @brief This return the prefixed size of a FlatBuffer.
+inline uoffset_t GetPrefixedSize(const uint8_t* buf){ return ReadScalar<uoffset_t>(buf); }
+
// Base class for native objects (FlatBuffer data de-serialized into native
// C++ data structures).
// Contains no functionality, purely documentative.
// clang-format off
#if defined(_MSC_VER)
- #define MANUALLY_ALIGNED_STRUCT(alignment) \
+ #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \
__pragma(pack(1)); \
struct __declspec(align(alignment))
- #define STRUCT_END(name, size) \
+ #define FLATBUFFERS_STRUCT_END(name, size) \
__pragma(pack()); \
static_assert(sizeof(name) == size, "compiler breaks packing rules")
#elif defined(__GNUC__) || defined(__clang__)
- #define MANUALLY_ALIGNED_STRUCT(alignment) \
+ #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \
_Pragma("pack(1)") \
struct __attribute__((aligned(alignment)))
- #define STRUCT_END(name, size) \
+ #define FLATBUFFERS_STRUCT_END(name, size) \
_Pragma("pack()") \
static_assert(sizeof(name) == size, "compiler breaks packing rules")
#else
#undef FLATBUFFERS_ET
};
-inline const char **ElementaryTypeNames() {
- static const char *names[] = {
+inline const char * const *ElementaryTypeNames() {
+ static const char * const names[] = {
#define FLATBUFFERS_ET(E) #E,
FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET)
#undef FLATBUFFERS_ET
struct TypeTable;
// Signature of the static method present in each type.
-typedef TypeTable *(*TypeFunction)();
+typedef const TypeTable *(*TypeFunction)();
struct TypeTable {
SequenceType st;
const TypeCode *type_codes;
const TypeFunction *type_refs;
const int32_t *values; // Only set for non-consecutive enum/union or structs.
- const char **names; // Only set if compiled with --reflect-names.
+ const char * const *names; // Only set if compiled with --reflect-names.
};
// String which identifies the current version of FlatBuffers.
#endif // !defined(_WIN32) && !defined(__CYGWIN__)
-#define DEFINE_BITMASK_OPERATORS(E, T)\
+#define FLATBUFFERS_DEFINE_BITMASK_OPERATORS(E, T)\
inline E operator | (E lhs, E rhs){\
return E(T(lhs) | T(rhs));\
}\
}
inline Type ToTypedVector(Type t, size_t fixed_len = 0) {
- assert(IsTypedVectorElementType(t));
+ FLATBUFFERS_ASSERT(IsTypedVectorElementType(t));
switch (fixed_len) {
case 0: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT);
case 2: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT2);
case 3: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT3);
case 4: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT4);
- default: assert(0); return TYPE_NULL;
+ default: FLATBUFFERS_ASSERT(0); return TYPE_NULL;
}
}
inline Type ToTypedVectorElementType(Type t) {
- assert(IsTypedVector(t));
+ FLATBUFFERS_ASSERT(IsTypedVector(t));
return static_cast<Type>(t - TYPE_VECTOR_INT + TYPE_INT);
}
inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) {
- assert(IsFixedTypedVector(t));
+ FLATBUFFERS_ASSERT(IsFixedTypedVector(t));
auto fixed_type = t - TYPE_VECTOR_INT2;
*len = static_cast<uint8_t>(fixed_type / 3 +
2); // 3 types each, starting from length 2.
return Mutate(dest, static_cast<double>(t), byte_width, value_width);
if (byte_width == sizeof(float))
return Mutate(dest, static_cast<float>(t), byte_width, value_width);
- assert(false);
+ FLATBUFFERS_ASSERT(false);
return false;
}
// We should have interleaved keys and values on the stack.
// Make sure it is an even number:
auto len = stack_.size() - start;
- assert(!(len & 1));
+ FLATBUFFERS_ASSERT(!(len & 1));
len /= 2;
// Make sure keys are all strings:
for (auto key = start; key < stack_.size(); key += 2) {
- assert(stack_[key].type_ == TYPE_KEY);
+ FLATBUFFERS_ASSERT(stack_[key].type_ == TYPE_KEY);
}
// Now sort values, so later we can do a binary seach lookup.
// We want to sort 2 array elements at a time.
// TODO: Have to check for pointer equality, as some sort
// implementation apparently call this function with the same
// element?? Why?
- assert(comp || &a == &b);
+ FLATBUFFERS_ASSERT(comp || &a == &b);
return comp < 0;
});
// First create a vector out of all keys.
template<typename T> size_t FixedTypedVector(const T *elems, size_t len) {
// We only support a few fixed vector lengths. Anything bigger use a
// regular typed vector.
- assert(len >= 2 && len <= 4);
+ FLATBUFFERS_ASSERT(len >= 2 && len <= 4);
// And only scalar values.
- assert(flatbuffers::is_scalar<T>::value);
+ static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
return ScalarVector(elems, len, true);
}
// in a parent. You need to have exactly one root to finish a buffer.
// Check your Start/End calls are matched, and all objects are inside
// some other object.
- assert(stack_.size() == 1);
+ FLATBUFFERS_ASSERT(stack_.size() == 1);
// Write root value.
auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
// If you get this assert, you're attempting to get access a buffer
// which hasn't been finished yet. Be sure to call
// Builder::Finish with your root object.
- assert(finished_);
+ FLATBUFFERS_ASSERT(finished_);
}
// Align to prepare for writing a scalar with a certain size.
}
template<typename T> void Write(T val, size_t byte_width) {
- assert(sizeof(T) >= byte_width);
+ FLATBUFFERS_ASSERT(sizeof(T) >= byte_width);
val = flatbuffers::EndianScalar(val);
WriteBytes(&val, byte_width);
}
case 4: Write(static_cast<float>(f), byte_width); break;
// case 2: Write(static_cast<half>(f), byte_width); break;
// case 1: Write(static_cast<quarter>(f), byte_width); break;
- default: assert(0);
+ default: FLATBUFFERS_ASSERT(0);
}
}
void WriteOffset(uint64_t o, uint8_t byte_width) {
auto reloff = buf_.size() - o;
- assert(reloff < 1ULL << (byte_width * 8) || byte_width == 8);
+ FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8));
Write(reloff, byte_width);
}
case 2: return BIT_WIDTH_16;
case 4: return BIT_WIDTH_32;
case 8: return BIT_WIDTH_64;
- default: assert(false); return BIT_WIDTH_64;
+ default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64;
}
}
template<typename T> static Type GetScalarType() {
- assert(flatbuffers::is_scalar<T>::value);
+ static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
return flatbuffers::is_floating_point<T>::value
? TYPE_FLOAT
: flatbuffers::is_same<T, bool>::value
byte_width)
return bit_width;
}
- assert(false); // Must match one of the sizes above.
+ FLATBUFFERS_ASSERT(false); // Must match one of the sizes above.
return BIT_WIDTH_64;
}
}
// byte vector > 255 elements). For such types, write a "blob" instead.
// TODO: instead of asserting, could write vector with larger elements
// instead, though that would be wasteful.
- assert(WidthU(len) <= bit_width);
+ FLATBUFFERS_ASSERT(WidthU(len) <= bit_width);
if (!fixed) Write<uint64_t>(len, byte_width);
auto vloc = buf_.size();
for (size_t i = 0; i < len; i++) Write(elems[i], byte_width);
} else {
// If you get this assert, you are writing a typed vector with
// elements that are not all the same type.
- assert(vector_type == stack_[i].type_);
+ FLATBUFFERS_ASSERT(vector_type == stack_[i].type_);
}
}
}
// If you get this assert, your fixed types are not one of:
// Int / UInt / Float / Key.
- assert(IsTypedVectorElementType(vector_type));
+ FLATBUFFERS_ASSERT(IsTypedVectorElementType(vector_type));
auto byte_width = Align(bit_width);
// Write vector. First the keys width/offset if available, and size.
if (keys) {
virtual ~SliceAllocator() { grpc_slice_unref(slice_); }
virtual uint8_t *allocate(size_t size) override {
- assert(GRPC_SLICE_IS_EMPTY(slice_));
+ FLATBUFFERS_ASSERT(GRPC_SLICE_IS_EMPTY(slice_));
slice_ = grpc_slice_malloc(size);
return GRPC_SLICE_START_PTR(slice_);
}
virtual void deallocate(uint8_t *p, size_t size) override {
- assert(p == GRPC_SLICE_START_PTR(slice_));
- assert(size == GRPC_SLICE_LENGTH(slice_));
+ FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_));
+ FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_));
grpc_slice_unref(slice_);
slice_ = grpc_empty_slice();
}
virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size,
size_t new_size, size_t in_use_back,
size_t in_use_front) override {
- assert(old_p == GRPC_SLICE_START_PTR(slice_));
- assert(old_size == GRPC_SLICE_LENGTH(slice_));
- assert(new_size > old_size);
+ FLATBUFFERS_ASSERT(old_p == GRPC_SLICE_START_PTR(slice_));
+ FLATBUFFERS_ASSERT(old_size == GRPC_SLICE_LENGTH(slice_));
+ FLATBUFFERS_ASSERT(new_size > old_size);
grpc_slice old_slice = slice_;
grpc_slice new_slice = grpc_slice_malloc(new_size);
uint8_t *new_p = GRPC_SLICE_START_PTR(new_slice);
private:
grpc_slice &get_slice(uint8_t *p, size_t size) {
- assert(p == GRPC_SLICE_START_PTR(slice_));
- assert(size == GRPC_SLICE_LENGTH(slice_));
+ FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_));
+ FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_));
return slice_;
}
auto msg_data = buf_.data(); // pointer to msg
auto msg_size = buf_.size(); // size of msg
// Do some sanity checks on data/size
- assert(msg_data);
- assert(msg_size);
- assert(msg_data >= buf_data);
- assert(msg_data + msg_size <= buf_data + buf_size);
+ FLATBUFFERS_ASSERT(msg_data);
+ FLATBUFFERS_ASSERT(msg_size);
+ FLATBUFFERS_ASSERT(msg_data >= buf_data);
+ FLATBUFFERS_ASSERT(msg_data + msg_size <= buf_data + buf_size);
// Calculate offsets from the buffer start
auto begin = msg_data - buf_data;
auto end = begin + msg_size;
static const uint64_t kOffsetBasis = 0xcbf29ce484222645ULL;
};
-template<typename T> T HashFnv1(const char *input) {
+template<typename T> FLATBUFFERS_CONSTEXPR_CPP14 T HashFnv1(const char *input) {
T hash = FnvTraits<T>::kOffsetBasis;
for (const char *c = input; *c; ++c) {
hash *= FnvTraits<T>::kFnvPrime;
return hash;
}
-template<typename T> T HashFnv1a(const char *input) {
+template<typename T> FLATBUFFERS_CONSTEXPR_CPP14 T HashFnv1a(const char *input) {
T hash = FnvTraits<T>::kOffsetBasis;
for (const char *c = input; *c; ++c) {
hash ^= static_cast<unsigned char>(*c);
return hash;
}
-template<typename T> struct NamedHashFunction {
+template <> FLATBUFFERS_CONSTEXPR_CPP14 inline uint16_t HashFnv1<uint16_t>(const char *input) {
+ uint32_t hash = HashFnv1<uint32_t>(input);
+ return (hash >> 16) ^ (hash & 0xffff);
+}
+
+template <> FLATBUFFERS_CONSTEXPR_CPP14 inline uint16_t HashFnv1a<uint16_t>(const char *input) {
+ uint32_t hash = HashFnv1a<uint32_t>(input);
+ return (hash >> 16) ^ (hash & 0xffff);
+}
+
+template <typename T> struct NamedHashFunction {
const char *name;
typedef T (*HashFunction)(const char *);
HashFunction function;
};
+const NamedHashFunction<uint16_t> kHashFunctions16[] = {
+ { "fnv1_16", HashFnv1<uint16_t> },
+ { "fnv1a_16", HashFnv1a<uint16_t> },
+};
+
const NamedHashFunction<uint32_t> kHashFunctions32[] = {
{ "fnv1_32", HashFnv1<uint32_t> },
{ "fnv1a_32", HashFnv1a<uint32_t> },
{ "fnv1a_64", HashFnv1a<uint64_t> },
};
+inline NamedHashFunction<uint16_t>::HashFunction FindHashFunction16(
+ const char *name) {
+ std::size_t size = sizeof(kHashFunctions16) / sizeof(kHashFunctions16[0]);
+ for (std::size_t i = 0; i < size; ++i) {
+ if (std::strcmp(name, kHashFunctions16[i].name) == 0) {
+ return kHashFunctions16[i].function;
+ }
+ }
+ return nullptr;
+}
+
inline NamedHashFunction<uint32_t>::HashFunction FindHashFunction32(
const char *name) {
std::size_t size = sizeof(kHashFunctions32) / sizeof(kHashFunctions32[0]);
dict.erase(it);
dict[newname] = obj;
} else {
- assert(false);
+ FLATBUFFERS_ASSERT(false);
}
}
bool mutable_buffer;
bool one_file;
bool proto_mode;
+ bool proto_oneof_union;
bool generate_all;
bool skip_unexpected_fields_in_json;
bool generate_name_strings;
std::string go_namespace;
bool reexport_ts_modules;
bool protobuf_ascii_alike;
+ bool size_prefixed;
// Possible options for the more general generator below.
enum Language {
mutable_buffer(false),
one_file(false),
proto_mode(false),
+ proto_oneof_union(false),
generate_all(false),
skip_unexpected_fields_in_json(false),
generate_name_strings(false),
skip_flatbuffers_import(false),
reexport_ts_modules(true),
protobuf_ascii_alike(false),
+ size_prefixed(false),
lang(IDLOptions::kJava),
mini_reflect(IDLOptions::kNone),
lang_to_generate(0) {}
*this = other; // Use assignment operator.
}
- ~CheckedError() { assert(has_been_checked_); }
+ ~CheckedError() { FLATBUFFERS_ASSERT(has_been_checked_); }
bool Check() {
has_been_checked_ = true;
known_attributes_["idempotent"] = true;
known_attributes_["cpp_type"] = true;
known_attributes_["cpp_ptr_type"] = true;
+ known_attributes_["cpp_ptr_type_get"] = true;
known_attributes_["cpp_str_type"] = true;
known_attributes_["native_inline"] = true;
known_attributes_["native_custom_alloc"] = true;
FLATBUFFERS_CHECKED_ERROR ParseHexNum(int nibbles, uint64_t *val);
FLATBUFFERS_CHECKED_ERROR Next();
FLATBUFFERS_CHECKED_ERROR SkipByteOrderMark();
- bool Is(int t);
- bool IsIdent(const char *id);
+ bool Is(int t) const;
+ bool IsIdent(const char *id) const;
FLATBUFFERS_CHECKED_ERROR Expect(int t);
- std::string TokenToStringId(int t);
+ std::string TokenToStringId(int t) const;
EnumDef *LookupEnum(const std::string &id);
FLATBUFFERS_CHECKED_ERROR ParseNamespacing(std::string *id,
std::string *last);
size_t fieldn,
const StructDef *parent_struct_def);
FLATBUFFERS_CHECKED_ERROR ParseMetaData(SymbolTable<Value> *attributes);
- FLATBUFFERS_CHECKED_ERROR TryTypedValue(int dtoken, bool check, Value &e,
+ FLATBUFFERS_CHECKED_ERROR TryTypedValue(const std::string *name, int dtoken, bool check, Value &e,
BaseType req, bool *destmatch);
FLATBUFFERS_CHECKED_ERROR ParseHash(Value &e, FieldDef* field);
FLATBUFFERS_CHECKED_ERROR TokenError();
- FLATBUFFERS_CHECKED_ERROR ParseSingleValue(Value &e);
+ FLATBUFFERS_CHECKED_ERROR ParseSingleValue(const std::string *name, Value &e);
FLATBUFFERS_CHECKED_ERROR ParseEnumFromString(Type &type, int64_t *result);
StructDef *LookupCreateStruct(const std::string &name,
bool create_if_new = true,
FLATBUFFERS_CHECKED_ERROR ParseNamespace();
FLATBUFFERS_CHECKED_ERROR StartStruct(const std::string &name,
StructDef **dest);
+ FLATBUFFERS_CHECKED_ERROR StartEnum(const std::string &name,
+ bool is_union,
+ EnumDef **dest);
FLATBUFFERS_CHECKED_ERROR ParseDecl();
FLATBUFFERS_CHECKED_ERROR ParseService();
FLATBUFFERS_CHECKED_ERROR ParseProtoFields(StructDef *struct_def,
case ST_TABLE:
case ST_UNION: return 4;
case ST_STRUCT: return type_table->values[type_table->num_elems];
- default: assert(false); return 1;
+ default: FLATBUFFERS_ASSERT(false); return 1;
}
- default: assert(false); return 1;
+ default: FLATBUFFERS_ASSERT(false); return 1;
}
}
case ST_STRUCT: IterateObject(val, type_table, visitor); break;
case ST_UNION: {
val += ReadScalar<uoffset_t>(val);
- assert(prev_val);
+ FLATBUFFERS_ASSERT(prev_val);
auto union_type = *prev_val; // Always a uint8_t.
if (vector_index >= 0) {
auto type_vec = reinterpret_cast<const Vector<uint8_t> *>(prev_val);
}
break;
}
- case ST_ENUM: assert(false); break;
+ case ST_ENUM: FLATBUFFERS_ASSERT(false); break;
}
break;
}
// Get a field's default, if you know it's an integer, and its exact type.
template<typename T> T GetFieldDefaultI(const reflection::Field &field) {
- assert(sizeof(T) == GetTypeSize(field.type()->base_type()));
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
return static_cast<T>(field.default_integer());
}
// Get a field's default, if you know it's floating point and its exact type.
template<typename T> T GetFieldDefaultF(const reflection::Field &field) {
- assert(sizeof(T) == GetTypeSize(field.type()->base_type()));
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
return static_cast<T>(field.default_real());
}
// Get a field, if you know it's an integer, and its exact type.
template<typename T>
T GetFieldI(const Table &table, const reflection::Field &field) {
- assert(sizeof(T) == GetTypeSize(field.type()->base_type()));
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
return table.GetField<T>(field.offset(),
static_cast<T>(field.default_integer()));
}
// Get a field, if you know it's floating point and its exact type.
template<typename T>
T GetFieldF(const Table &table, const reflection::Field &field) {
- assert(sizeof(T) == GetTypeSize(field.type()->base_type()));
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
return table.GetField<T>(field.offset(),
static_cast<T>(field.default_real()));
}
// Get a field, if you know it's a string.
inline const String *GetFieldS(const Table &table,
const reflection::Field &field) {
- assert(field.type()->base_type() == reflection::String);
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::String);
return table.GetPointer<const String *>(field.offset());
}
// Get a field, if you know it's a vector.
template<typename T>
Vector<T> *GetFieldV(const Table &table, const reflection::Field &field) {
- assert(field.type()->base_type() == reflection::Vector &&
- sizeof(T) == GetTypeSize(field.type()->element()));
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Vector &&
+ sizeof(T) == GetTypeSize(field.type()->element()));
return table.GetPointer<Vector<T> *>(field.offset());
}
// Get a field, if you know it's a table.
inline Table *GetFieldT(const Table &table, const reflection::Field &field) {
- assert(field.type()->base_type() == reflection::Obj ||
- field.type()->base_type() == reflection::Union);
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj ||
+ field.type()->base_type() == reflection::Union);
return table.GetPointer<Table *>(field.offset());
}
const reflection::Field &field) {
// TODO: This does NOT check if the field is a table or struct, but we'd need
// access to the schema to check the is_struct flag.
- assert(field.type()->base_type() == reflection::Obj);
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj);
return table.GetStruct<const Struct *>(field.offset());
}
// Get a structure's field, if you know it's a struct.
inline const Struct *GetFieldStruct(const Struct &structure,
const reflection::Field &field) {
- assert(field.type()->base_type() == reflection::Obj);
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj);
return structure.GetStruct<const Struct *>(field.offset());
}
bool SetField(Table *table, const reflection::Field &field, T val) {
reflection::BaseType type = field.type()->base_type();
if (!IsScalar(type)) { return false; }
- assert(sizeof(T) == GetTypeSize(type));
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(type));
T def;
if (IsInteger(type)) {
def = GetFieldDefaultI<T>(field);
} else {
- assert(IsFloat(type));
+ FLATBUFFERS_ASSERT(IsFloat(type));
def = GetFieldDefaultF<T>(field);
}
return table->SetField(field.offset(), val, def);
// TODO: this is clumsy and slow, but no other way to find it?
auto type_field = parent.fields()->LookupByKey(
(unionfield.name()->str() + UnionTypeFieldSuffix()).c_str());
- assert(type_field);
+ FLATBUFFERS_ASSERT(type_field);
auto union_type = GetFieldI<uint8_t>(table, *type_field);
auto enumval = enumdef->values()->LookupByKey(union_type);
return *enumval->object();
inline bool SetFieldT(Table *table, const reflection::Field &field,
const uint8_t *val) {
- assert(sizeof(uoffset_t) == GetTypeSize(field.type()->base_type()));
+ FLATBUFFERS_ASSERT(sizeof(uoffset_t) ==
+ GetTypeSize(field.type()->base_type()));
return table->SetPointer(field.offset(), val);
}
Union = 16
};
-inline BaseType (&EnumValuesBaseType())[17] {
- static BaseType values[] = {
+inline const BaseType (&EnumValuesBaseType())[17] {
+ static const BaseType values[] = {
None,
UType,
Bool,
return values;
}
-inline const char **EnumNamesBaseType() {
- static const char *names[] = {
+inline const char * const *EnumNamesBaseType() {
+ static const char * const names[] = {
"None",
"UType",
"Bool",
void add_index(int32_t index) {
fbb_.AddElement<int32_t>(Type::VT_INDEX, index, -1);
}
- TypeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit TypeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
void add_value(flatbuffers::Offset<flatbuffers::String> value) {
fbb_.AddOffset(KeyValue::VT_VALUE, value);
}
- KeyValueBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit KeyValueBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
void add_union_type(flatbuffers::Offset<Type> union_type) {
fbb_.AddOffset(EnumVal::VT_UNION_TYPE, union_type);
}
- EnumValBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit EnumValBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
fbb_.AddOffset(Enum::VT_DOCUMENTATION, documentation);
}
- EnumBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit EnumBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
fbb_.AddOffset(Field::VT_DOCUMENTATION, documentation);
}
- FieldBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit FieldBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
fbb_.AddOffset(Object::VT_DOCUMENTATION, documentation);
}
- ObjectBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit ObjectBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
void add_root_table(flatbuffers::Offset<Object> root_table) {
fbb_.AddOffset(Schema::VT_ROOT_TABLE, root_table);
}
- SchemaBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit SchemaBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
return flatbuffers::GetRoot<reflection::Schema>(buf);
}
+inline const reflection::Schema *GetSizePrefixedSchema(const void *buf) {
+ return flatbuffers::GetSizePrefixedRoot<reflection::Schema>(buf);
+}
+
inline const char *SchemaIdentifier() {
return "BFBS";
}
return verifier.VerifyBuffer<reflection::Schema>(SchemaIdentifier());
}
+inline bool VerifySizePrefixedSchemaBuffer(
+ flatbuffers::Verifier &verifier) {
+ return verifier.VerifySizePrefixedBuffer<reflection::Schema>(SchemaIdentifier());
+}
+
inline const char *SchemaExtension() {
return "bfbs";
}
fbb.Finish(root, SchemaIdentifier());
}
+inline void FinishSizePrefixedSchemaBuffer(
+ flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<reflection::Schema> root) {
+ fbb.FinishSizePrefixed(root, SchemaIdentifier());
+}
+
} // namespace reflection
#endif // FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_
// Convert a unicode code point into a UTF-8 representation by appending it
// to a string. Returns the number of bytes generated.
inline int ToUTF8(uint32_t ucc, std::string *out) {
- assert(!(ucc & 0x80000000)); // Top bit can't be set.
+ FLATBUFFERS_ASSERT(!(ucc & 0x80000000)); // Top bit can't be set.
// 6 possible encodings: http://en.wikipedia.org/wiki/UTF-8
for (int i = 0; i < 6; i++) {
// Max bits this encoding can represent.
return i + 1; // Return the number of bytes added.
}
}
- assert(0); // Impossible to arrive here.
+ FLATBUFFERS_ASSERT(0); // Impossible to arrive here.
return -1;
}
break;
}
}
- if ((**in << len) & 0x80) return -1; // Bit after leading 1's must be 0.
+ if ((static_cast<unsigned char>(**in) << len) & 0x80) return -1; // Bit after leading 1's must be 0.
if (!len) return *(*in)++;
// UTF-8 encoded values with a length are between 2 and 4 bytes.
if (len < 2 || len > 4) { return -1; }
--- /dev/null
+/*
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.flatbuffers;
+
+import static com.google.flatbuffers.Constants.*;
+
+import java.nio.ByteBuffer;
+
+/// @file
+/// @addtogroup flatbuffers_java_api
+/// @{
+
+/**
+ * Class that collects utility functions around `ByteBuffer`.
+ */
+public class ByteBufferUtil {
+
+ /**
+ * Extract the size prefix from a `ByteBuffer`.
+ *
+ * @param bb a size-prefixed buffer
+ * @return the size prefix
+ */
+ public static int getSizePrefix(ByteBuffer bb) {
+ return bb.getInt(bb.position());
+ }
+
+ /**
+ * Create a duplicate of a size-prefixed `ByteBuffer` that has its position
+ * advanced just past the size prefix.
+ *
+ * @param bb a size-prefixed buffer
+ * @return a new buffer on the same underlying data that has skipped the
+ * size prefix
+ */
+ public static ByteBuffer removeSizePrefix(ByteBuffer bb) {
+ ByteBuffer s = bb.duplicate();
+ s.position(s.position() + SIZE_PREFIX_LENGTH);
+ return s;
+ }
+
+}
+
+/// @}
static final int SIZEOF_DOUBLE = 8;
/** The number of bytes in a file identifier. */
static final int FILE_IDENTIFIER_LENGTH = 4;
+ /** The number of bytes in a size prefix. */
+ public static final int SIZE_PREFIX_LENGTH = 4;
}
/// @endcond
int old_buf_size = bb.capacity();
if ((old_buf_size & 0xC0000000) != 0) // Ensure we don't grow beyond what fits in an int.
throw new AssertionError("FlatBuffers: cannot grow buffer beyond 2 gigabytes.");
- int new_buf_size = old_buf_size << 1;
+ int new_buf_size = old_buf_size == 0 ? 1 : old_buf_size << 1;
bb.position(0);
ByteBuffer nbb = bb_factory.newByteBuffer(new_buf_size);
nbb.position(new_buf_size - old_buf_size);
* Finalize a buffer, pointing to the given `root_table`.
*
* @param root_table An offset to be added to the buffer.
+ * @param size_prefix Whether to prefix the size to the buffer.
*/
- public void finish(int root_table) {
- prep(minalign, SIZEOF_INT);
+ protected void finish(int root_table, boolean size_prefix) {
+ prep(minalign, SIZEOF_INT + (size_prefix ? SIZEOF_INT : 0));
addOffset(root_table);
+ if (size_prefix) {
+ addInt(bb.capacity() - space);
+ }
bb.position(space);
finished = true;
}
+ /**
+ * Finalize a buffer, pointing to the given `root_table`.
+ *
+ * @param root_table An offset to be added to the buffer.
+ */
+ public void finish(int root_table) {
+ finish(root_table, false);
+ }
+
+ /**
+ * Finalize a buffer, pointing to the given `root_table`, with the size prefixed.
+ *
+ * @param root_table An offset to be added to the buffer.
+ */
+ public void finishSizePrefixed(int root_table) {
+ finish(root_table, true);
+ }
+
/**
* Finalize a buffer, pointing to the given `root_table`.
*
* @param root_table An offset to be added to the buffer.
* @param file_identifier A FlatBuffer file identifier to be added to the buffer before
* `root_table`.
+ * @param size_prefix Whether to prefix the size to the buffer.
*/
- public void finish(int root_table, String file_identifier) {
- prep(minalign, SIZEOF_INT + FILE_IDENTIFIER_LENGTH);
+ protected void finish(int root_table, String file_identifier, boolean size_prefix) {
+ prep(minalign, SIZEOF_INT + FILE_IDENTIFIER_LENGTH + (size_prefix ? SIZEOF_INT : 0));
if (file_identifier.length() != FILE_IDENTIFIER_LENGTH)
throw new AssertionError("FlatBuffers: file identifier must be length " +
FILE_IDENTIFIER_LENGTH);
for (int i = FILE_IDENTIFIER_LENGTH - 1; i >= 0; i--) {
addByte((byte)file_identifier.charAt(i));
}
- finish(root_table);
+ finish(root_table, size_prefix);
+ }
+
+ /**
+ * Finalize a buffer, pointing to the given `root_table`.
+ *
+ * @param root_table An offset to be added to the buffer.
+ * @param file_identifier A FlatBuffer file identifier to be added to the buffer before
+ * `root_table`.
+ */
+ public void finish(int root_table, String file_identifier) {
+ finish(root_table, file_identifier, false);
+ }
+
+ /**
+ * Finalize a buffer, pointing to the given `root_table`, with the size prefixed.
+ *
+ * @param root_table An offset to be added to the buffer.
+ * @param file_identifier A FlatBuffer file identifier to be added to the buffer before
+ * `root_table`.
+ */
+ public void finishSizePrefixed(int root_table, String file_identifier) {
+ finish(root_table, file_identifier, true);
}
/**
cr.throwException();
}
} catch (CharacterCodingException x) {
- throw new Error(x);
+ throw new RuntimeException(x);
}
return dst.flip().toString();
return bb;
}
+ /**
+ * Initialize vector as a ByteBuffer.
+ *
+ * This is more efficient than using duplicate, since it doesn't copy the data
+ * nor allocattes a new {@link ByteBuffer}, creating no garbage to be collected.
+ *
+ * @param bb The {@link ByteBuffer} for the array
+ * @param vector_offset The position of the vector in the byte buffer
+ * @param elem_size The size of each element in the array
+ * @return The {@link ByteBuffer} for the array
+ */
+ protected ByteBuffer __vector_in_bytebuffer(ByteBuffer bb, int vector_offset, int elem_size) {
+ int o = this.__offset(vector_offset);
+ if (o == 0) return null;
+ int vectorstart = __vector(o);
+ bb.rewind();
+ bb.limit(vectorstart + __vector_len(o) * elem_size);
+ bb.position(vectorstart);
+ return bb;
+ }
+
/**
* Initialize any Table-derived type to point to the union at the given `offset`.
*
//
using System;
+using System.IO;
+using System.Text;
namespace FlatBuffers
{
/// </summary>
public class ByteBuffer
{
- private readonly byte[] _buffer;
+ protected byte[] _buffer;
private int _pos; // Must track start of the buffer.
public int Length { get { return _buffer.Length; } }
- public byte[] Data { get { return _buffer; } }
+ public ByteBuffer(int size) : this(new byte[size]) { }
public ByteBuffer(byte[] buffer) : this(buffer, 0) { }
_pos = 0;
}
+ // Create a new ByteBuffer on the same underlying data.
+ // The new ByteBuffer's position will be same as this buffer's.
+ public ByteBuffer Duplicate()
+ {
+ return new ByteBuffer(_buffer, Position);
+ }
+
+ // Increases the size of the ByteBuffer, and copies the old data towards
+ // the end of the new buffer.
+ public void GrowFront(int newSize)
+ {
+ if ((Length & 0xC0000000) != 0)
+ throw new Exception(
+ "ByteBuffer: cannot grow buffer beyond 2 gigabytes.");
+
+ if (newSize < Length)
+ throw new Exception("ByteBuffer: cannot truncate buffer.");
+
+ byte[] newBuffer = new byte[newSize];
+ Buffer.BlockCopy(_buffer, 0, newBuffer, newSize - Length,
+ Length);
+ _buffer = newBuffer;
+ }
+
+ public byte[] ToArray(int pos, int len)
+ {
+ byte[] arr = new byte[len];
+ Buffer.BlockCopy(_buffer, pos, arr, 0, len);
+ return arr;
+ }
+
+ public byte[] ToSizedArray()
+ {
+ return ToArray(Position, Length - Position);
+ }
+
+ public byte[] ToFullArray()
+ {
+ return ToArray(0, Length);
+ }
+
+ public ArraySegment<byte> ToArraySegment(int pos, int len)
+ {
+ return new ArraySegment<byte>(_buffer, pos, len);
+ }
+
+ public MemoryStream ToMemoryStream(int pos, int len)
+ {
+ return new MemoryStream(_buffer, pos, len);
+ }
+
+#if !UNSAFE_BYTEBUFFER
// Pre-allocated helper arrays for convertion.
private float[] floathelper = new[] { 0.0f };
private int[] inthelper = new[] { 0 };
private double[] doublehelper = new[] { 0.0 };
private ulong[] ulonghelper = new[] { 0UL };
+#endif // !UNSAFE_BYTEBUFFER
// Helper functions for the unsafe version.
static public ushort ReverseBytes(ushort input)
}
#endif // !UNSAFE_BYTEBUFFER
-
private void AssertOffsetAndLength(int offset, int length)
{
#if !BYTEBUFFER_NO_BOUNDS_CHECK
PutByte(offset, value);
}
+ public void PutStringUTF8(int offset, string value)
+ {
+ AssertOffsetAndLength(offset, value.Length);
+ Encoding.UTF8.GetBytes(value, 0, value.Length,
+ _buffer, offset);
+ }
+
#if UNSAFE_BYTEBUFFER
// Unsafe but more efficient versions of Put*.
public void PutShort(int offset, short value)
return _buffer[index];
}
+ public string GetStringUTF8(int startPos, int len)
+ {
+ return Encoding.UTF8.GetString(_buffer, startPos, len);
+ }
+
#if UNSAFE_BYTEBUFFER
// Unsafe but more efficient versions of Get*.
public short GetShort(int offset)
--- /dev/null
+/*
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace FlatBuffers
+{
+ /// <summary>
+ /// Class that collects utility functions around `ByteBuffer`.
+ /// </summary>
+ public class ByteBufferUtil
+ {
+ // Extract the size prefix from a `ByteBuffer`.
+ public static int GetSizePrefix(ByteBuffer bb) {
+ return bb.GetInt(bb.Position);
+ }
+
+ // Create a duplicate of a size-prefixed `ByteBuffer` that has its position
+ // advanced just past the size prefix.
+ public static ByteBuffer RemoveSizePrefix(ByteBuffer bb) {
+ ByteBuffer s = bb.Duplicate();
+ s.Position += FlatBufferConstants.SizePrefixLength;
+ return s;
+ }
+ }
+}
throw new ArgumentOutOfRangeException("initialSize",
initialSize, "Must be greater than zero");
_space = initialSize;
- _bb = new ByteBuffer(new byte[initialSize]);
+ _bb = new ByteBuffer(initialSize);
}
/// <summary>
// the end of the new buffer (since we build the buffer backwards).
void GrowBuffer()
{
- var oldBuf = _bb.Data;
- var oldBufSize = oldBuf.Length;
- if ((oldBufSize & 0xC0000000) != 0)
- throw new Exception(
- "FlatBuffers: cannot grow buffer beyond 2 gigabytes.");
-
- var newBufSize = oldBufSize << 1;
- var newBuf = new byte[newBufSize];
-
- Buffer.BlockCopy(oldBuf, 0, newBuf, newBufSize - oldBufSize,
- oldBufSize);
- _bb = new ByteBuffer(newBuf, newBufSize);
+ _bb.GrowFront(_bb.Length << 1);
}
// Prepare to write an element of `size` after `additional_bytes`
AddByte(0);
var utf8StringLen = Encoding.UTF8.GetByteCount(s);
StartVector(1, utf8StringLen, 1);
- Encoding.UTF8.GetBytes(s, 0, s.Length, _bb.Data, _space -= utf8StringLen);
+ _bb.PutStringUTF8(_space -= utf8StringLen, s);
return new StringOffset(EndVector().Value);
}
/// <param name="rootTable">
/// An offset to be added to the buffer.
/// </param>
- public void Finish(int rootTable)
+ /// <param name="sizePrefix">
+ /// Whether to prefix the size to the buffer.
+ /// </param>
+ protected void Finish(int rootTable, bool sizePrefix)
{
- Prep(_minAlign, sizeof(int));
+ Prep(_minAlign, sizeof(int) + (sizePrefix ? sizeof(int) : 0));
AddOffset(rootTable);
+ if (sizePrefix) {
+ AddInt(_bb.Length - _space);
+ }
_bb.Position = _space;
}
+ /// <summary>
+ /// Finalize a buffer, pointing to the given `root_table`.
+ /// </summary>
+ /// <param name="rootTable">
+ /// An offset to be added to the buffer.
+ /// </param>
+ public void Finish(int rootTable)
+ {
+ Finish(rootTable, false);
+ }
+
+ /// <summary>
+ /// Finalize a buffer, pointing to the given `root_table`, with the size prefixed.
+ /// </summary>
+ /// <param name="rootTable">
+ /// An offset to be added to the buffer.
+ /// </param>
+ public void FinishSizePrefixed(int rootTable)
+ {
+ Finish(rootTable, true);
+ }
+
/// <summary>
/// Get the ByteBuffer representing the FlatBuffer.
/// </summary>
/// </returns>
public byte[] SizedByteArray()
{
- var newArray = new byte[_bb.Data.Length - _bb.Position];
- Buffer.BlockCopy(_bb.Data, _bb.Position, newArray, 0,
- _bb.Data.Length - _bb.Position);
- return newArray;
- }
-
- /// <summary>
- /// Finalize a buffer, pointing to the given `rootTable`.
- /// </summary>
- /// <param name="rootTable">
- /// An offset to be added to the buffer.
- /// </param>
- /// <param name="fileIdentifier">
- /// A FlatBuffer file identifier to be added to the buffer before
- /// `root_table`.
- /// </param>
- public void Finish(int rootTable, string fileIdentifier)
- {
- Prep(_minAlign, sizeof(int) +
- FlatBufferConstants.FileIdentifierLength);
- if (fileIdentifier.Length !=
- FlatBufferConstants.FileIdentifierLength)
- throw new ArgumentException(
- "FlatBuffers: file identifier must be length " +
- FlatBufferConstants.FileIdentifierLength,
- "fileIdentifier");
- for (int i = FlatBufferConstants.FileIdentifierLength - 1; i >= 0;
- i--)
- {
- AddByte((byte)fileIdentifier[i]);
- }
- Finish(rootTable);
+ return _bb.ToSizedArray();
+ }
+
+ /// <summary>
+ /// Finalize a buffer, pointing to the given `rootTable`.
+ /// </summary>
+ /// <param name="rootTable">
+ /// An offset to be added to the buffer.
+ /// </param>
+ /// <param name="fileIdentifier">
+ /// A FlatBuffer file identifier to be added to the buffer before
+ /// `root_table`.
+ /// </param>
+ /// <param name="sizePrefix">
+ /// Whether to prefix the size to the buffer.
+ /// </param>
+ protected void Finish(int rootTable, string fileIdentifier, bool sizePrefix)
+ {
+ Prep(_minAlign, sizeof(int) + (sizePrefix ? sizeof(int) : 0) +
+ FlatBufferConstants.FileIdentifierLength);
+ if (fileIdentifier.Length !=
+ FlatBufferConstants.FileIdentifierLength)
+ throw new ArgumentException(
+ "FlatBuffers: file identifier must be length " +
+ FlatBufferConstants.FileIdentifierLength,
+ "fileIdentifier");
+ for (int i = FlatBufferConstants.FileIdentifierLength - 1; i >= 0;
+ i--)
+ {
+ AddByte((byte)fileIdentifier[i]);
+ }
+ Finish(rootTable, sizePrefix);
}
+ /// <summary>
+ /// Finalize a buffer, pointing to the given `rootTable`.
+ /// </summary>
+ /// <param name="rootTable">
+ /// An offset to be added to the buffer.
+ /// </param>
+ /// <param name="fileIdentifier">
+ /// A FlatBuffer file identifier to be added to the buffer before
+ /// `root_table`.
+ /// </param>
+ public void Finish(int rootTable, string fileIdentifier)
+ {
+ Finish(rootTable, fileIdentifier, false);
+ }
+ /// <summary>
+ /// Finalize a buffer, pointing to the given `rootTable`, with the size prefixed.
+ /// </summary>
+ /// <param name="rootTable">
+ /// An offset to be added to the buffer.
+ /// </param>
+ /// <param name="fileIdentifier">
+ /// A FlatBuffer file identifier to be added to the buffer before
+ /// `root_table`.
+ /// </param>
+ public void FinishSizePrefixed(int rootTable, string fileIdentifier)
+ {
+ Finish(rootTable, fileIdentifier, true);
+ }
}
}
public static class FlatBufferConstants
{
public const int FileIdentifierLength = 4;
+ public const int SizePrefixLength = 4;
}
}
offset += bb.GetInt(offset);
var len = bb.GetInt(offset);
var startPos = offset + sizeof(int);
- return Encoding.UTF8.GetString(bb.Data, startPos , len);
+ return bb.GetStringUTF8(startPos, len);
}
// Get the length of a vector whose offset is stored at "offset" in this object.
var pos = this.__vector(o);
var len = this.__vector_len(o);
- return new ArraySegment<byte>(this.bb.Data, pos, len);
+ return bb.ToArraySegment(pos, len);
}
// Initialize any Table-derived type to point to the union at the given offset.
var startPos_1 = offset_1 + sizeof(int);
var startPos_2 = offset_2 + sizeof(int);
var len = Math.Min(len_1, len_2);
- byte[] bbArray = bb.Data;
for(int i = 0; i < len; i++) {
- if (bbArray[i + startPos_1] != bbArray[i + startPos_2])
- return bbArray[i + startPos_1] - bbArray[i + startPos_2];
+ byte b1 = bb.Get(i + startPos_1);
+ byte b2 = bb.Get(i + startPos_2);
+ if (b1 != b2)
+ return b1 - b2;
}
return len_1 - len_2;
}
var len_2 = key.Length;
var startPos_1 = offset_1 + sizeof(int);
var len = Math.Min(len_1, len_2);
- byte[] bbArray = bb.Data;
for (int i = 0; i < len; i++) {
- if (bbArray[i + startPos_1] != key[i])
- return bbArray[i + startPos_1] - key[i];
+ byte b = bb.Get(i + startPos_1);
+ if (b != key[i])
+ return b - key[i];
}
return len_1 - len_2;
}
{
"name": "flatbuffers",
- "version": "1.8.0",
+ "version": "1.9.0",
"description": "Memory Efficient Serialization Library",
"files": ["js/flatbuffers.js", "js/flatbuffers.mjs"],
"main": "js/flatbuffers",
<modelVersion>4.0.0</modelVersion>
<groupId>com.google.flatbuffers</groupId>
<artifactId>flatbuffers-java</artifactId>
- <version>1.8.0</version>
+ <version>1.9.0</version>
<packaging>bundle</packaging>
<name>FlatBuffers Java API</name>
<description>
self.current_vtable[slotnum] = self.Offset()
## @endcond
- def Finish(self, rootTable):
+ def __Finish(self, rootTable, sizePrefix):
"""Finish finalizes a buffer, pointing to the given `rootTable`."""
N.enforce_number(rootTable, N.UOffsetTFlags)
- self.Prep(self.minalign, N.UOffsetTFlags.bytewidth)
+ prepSize = N.UOffsetTFlags.bytewidth
+ if sizePrefix:
+ prepSize += N.Int32Flags.bytewidth
+ self.Prep(self.minalign, prepSize)
self.PrependUOffsetTRelative(rootTable)
+ if sizePrefix:
+ size = len(self.Bytes) - self.Head()
+ N.enforce_number(size, N.Int32Flags)
+ self.PrependInt32(size)
self.finished = True
return self.Head()
+ def Finish(self, rootTable):
+ """Finish finalizes a buffer, pointing to the given `rootTable`."""
+ return self.__Finish(rootTable, False)
+
+ def FinishSizePrefixed(self, rootTable):
+ """
+ Finish finalizes a buffer, pointing to the given `rootTable`,
+ with the size prefixed.
+ """
+ return self.__Finish(rootTable, True)
+
## @cond FLATBUFFERS_INTERNAL
def Prepend(self, flags, off):
self.Prep(flags.bytewidth, 0)
--- /dev/null
+# Copyright 2017 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import encode
+from . import number_types
+from . import packer
+
+def GetSizePrefix(buf, offset):
+ """Extract the size prefix from a buffer."""
+ return encode.Get(packer.int32, buf, offset)
+
+def RemoveSizePrefix(buf, offset):
+ """
+ Create a slice of a size-prefixed buffer that has
+ its position advanced just past the size prefix.
+ """
+ return buf, offset + number_types.Int32Flags.bytewidth
--- /dev/null
+:: Copyright 2015 Google Inc. All rights reserved.
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+
+set buildtype=Release
+if "%1"=="-b" set buildtype=%2
+
+..\%buildtype%\flatc.exe --cpp --no-prefix -o ../include/flatbuffers reflection.fbs
struct Weapon;
struct WeaponT;
+inline const flatbuffers::TypeTable *Vec3TypeTable();
+
+inline const flatbuffers::TypeTable *MonsterTypeTable();
+
+inline const flatbuffers::TypeTable *WeaponTypeTable();
+
enum Color {
Color_Red = 0,
Color_Green = 1,
Color_MAX = Color_Blue
};
-inline Color (&EnumValuesColor())[3] {
- static Color values[] = {
+inline const Color (&EnumValuesColor())[3] {
+ static const Color values[] = {
Color_Red,
Color_Green,
Color_Blue
return values;
}
-inline const char **EnumNamesColor() {
- static const char *names[] = {
+inline const char * const *EnumNamesColor() {
+ static const char * const names[] = {
"Red",
"Green",
"Blue",
Equipment_MAX = Equipment_Weapon
};
-inline Equipment (&EnumValuesEquipment())[2] {
- static Equipment values[] = {
+inline const Equipment (&EnumValuesEquipment())[2] {
+ static const Equipment values[] = {
Equipment_NONE,
Equipment_Weapon
};
return values;
}
-inline const char **EnumNamesEquipment() {
- static const char *names[] = {
+inline const char * const *EnumNamesEquipment() {
+ static const char * const names[] = {
"NONE",
"Weapon",
nullptr
bool VerifyEquipment(flatbuffers::Verifier &verifier, const void *obj, Equipment type);
bool VerifyEquipmentVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
-MANUALLY_ALIGNED_STRUCT(4) Vec3 FLATBUFFERS_FINAL_CLASS {
+FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) Vec3 FLATBUFFERS_FINAL_CLASS {
private:
float x_;
float y_;
flatbuffers::WriteScalar(&z_, _z);
}
};
-STRUCT_END(Vec3, 12);
+FLATBUFFERS_STRUCT_END(Vec3, 12);
struct MonsterT : public flatbuffers::NativeTable {
typedef Monster TableType;
struct Monster FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef MonsterT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return MonsterTypeTable();
+ }
enum {
VT_POS = 4,
VT_MANA = 6,
struct Weapon FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef WeaponT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return WeaponTypeTable();
+ }
enum {
VT_NAME = 4,
VT_DAMAGE = 6
type = Equipment_NONE;
}
-inline flatbuffers::TypeTable *Vec3TypeTable();
-
-inline flatbuffers::TypeTable *MonsterTypeTable();
-
-inline flatbuffers::TypeTable *WeaponTypeTable();
-
-inline flatbuffers::TypeTable *ColorTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *ColorTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
ColorTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"Red",
"Green",
"Blue"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *EquipmentTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *EquipmentTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 0 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
WeaponTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"NONE",
"Weapon"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_UNION, 2, type_codes, type_refs, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *Vec3TypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *Vec3TypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 }
};
static const int32_t values[] = { 0, 4, 8, 12 };
- static const char *names[] = {
+ static const char * const names[] = {
"x",
"y",
"z"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 3, type_codes, nullptr, values, names
};
return &tt;
}
-inline flatbuffers::TypeTable *MonsterTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *MonsterTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_UTYPE, 0, 3 },
{ flatbuffers::ET_SEQUENCE, 0, 3 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
Vec3TypeTable,
ColorTypeTable,
WeaponTypeTable,
EquipmentTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"pos",
"mana",
"hp",
"equipped_type",
"equipped"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 10, type_codes, type_refs, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *WeaponTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *WeaponTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_STRING, 0, -1 },
{ flatbuffers::ET_SHORT, 0, -1 }
};
- static const char *names[] = {
+ static const char * const names[] = {
"name",
"damage"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names
};
return &tt;
return flatbuffers::GetRoot<MyGame::Sample::Monster>(buf);
}
+inline const MyGame::Sample::Monster *GetSizePrefixedMonster(const void *buf) {
+ return flatbuffers::GetSizePrefixedRoot<MyGame::Sample::Monster>(buf);
+}
+
inline Monster *GetMutableMonster(void *buf) {
return flatbuffers::GetMutableRoot<Monster>(buf);
}
return verifier.VerifyBuffer<MyGame::Sample::Monster>(nullptr);
}
+inline bool VerifySizePrefixedMonsterBuffer(
+ flatbuffers::Verifier &verifier) {
+ return verifier.VerifySizePrefixedBuffer<MyGame::Sample::Monster>(nullptr);
+}
+
inline void FinishMonsterBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<MyGame::Sample::Monster> root) {
fbb.Finish(root);
}
+inline void FinishSizePrefixedMonsterBuffer(
+ flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<MyGame::Sample::Monster> root) {
+ fbb.FinishSizePrefixed(root);
+}
+
inline flatbuffers::unique_ptr<MonsterT> UnPackMonster(
const void *buf,
const flatbuffers::resolver_function_t *res = nullptr) {
const std::string &value = iter->second;
stream_ << value;
} else {
- assert(false && "could not find key");
+ FLATBUFFERS_ASSERT(false && "could not find key");
stream_ << key;
}
#include <list>
-#define FLATC_VERSION "1.8.0 (" __DATE__ " " __TIME__ ")"
+#define FLATC_VERSION "1.9.0 (" __DATE__ " " __TIME__ ")"
namespace flatbuffers {
" (default is \"github.com/google/flatbuffers/go\")\n"
" --raw-binary Allow binaries without file_indentifier to be read.\n"
" This may crash flatc given a mismatched schema.\n"
+ " --size-prefixed Input binaries are size prefixed buffers.\n"
" --proto Input is a .proto, translate to .fbs.\n"
+ " --oneof-union Translate .proto oneofs to flatbuffer unions.\n"
" --grpc Generate GRPC interfaces for the specified languages\n"
" --schema Serialize schemas instead of JSON (use with -b)\n"
" --bfbs-comments Add doc comments to the binary schema files.\n"
opts.one_file = true;
} else if (arg == "--raw-binary") {
raw_binary = true;
+ } else if (arg == "--size-prefixed") {
+ opts.size_prefixed = true;
} else if (arg == "--") { // Separator between text and binary inputs.
binary_files_from = filenames.size();
} else if (arg == "--proto") {
opts.proto_mode = true;
+ } else if (arg == "--oneof-union") {
+ opts.proto_oneof_union = true;
} else if (arg == "--schema") {
schema_binary = true;
} else if (arg == "-M") {
"\" matches the schema, use --raw-binary to read this file"
" anyway.");
} else if (!flatbuffers::BufferHasIdentifier(
- contents.c_str(), parser->file_identifier_.c_str())) {
+ contents.c_str(), parser->file_identifier_.c_str(), opts.size_prefixed)) {
Error("binary \"" + filename +
"\" does not have expected file_identifier \"" +
parser->file_identifier_ +
const char *name = argv[0];
if (argc <= 1) {
printf("%s HASH [OPTION]... STRING... [-- STRING...]\n", name);
- printf("Available hashing algorithms:\n 32 bit:\n");
- size_t size = sizeof(flatbuffers::kHashFunctions32) /
+ printf("Available hashing algorithms:\n");
+ printf(" 16 bit:\n");
+ size_t size = sizeof(flatbuffers::kHashFunctions16) /
+ sizeof(flatbuffers::kHashFunctions16[0]);
+ for (size_t i = 0; i < size; ++i) {
+ printf(" * %s\n", flatbuffers::kHashFunctions16[i].name);
+ }
+ printf(" 32 bit:\n");
+ size = sizeof(flatbuffers::kHashFunctions32) /
sizeof(flatbuffers::kHashFunctions32[0]);
for (size_t i = 0; i < size; ++i) {
printf(" * %s\n", flatbuffers::kHashFunctions32[i].name);
const char *hash_algorithm = argv[1];
+ flatbuffers::NamedHashFunction<uint16_t>::HashFunction hash_function16 =
+ flatbuffers::FindHashFunction16(hash_algorithm);
flatbuffers::NamedHashFunction<uint32_t>::HashFunction hash_function32 =
flatbuffers::FindHashFunction32(hash_algorithm);
flatbuffers::NamedHashFunction<uint64_t>::HashFunction hash_function64 =
flatbuffers::FindHashFunction64(hash_algorithm);
- if (!hash_function32 && !hash_function64) {
+ if (!hash_function16 && !hash_function32 && !hash_function64) {
printf("\"%s\" is not a known hash algorithm.\n", hash_algorithm);
return 0;
}
ss << std::hex;
ss << "0x";
}
- if (hash_function32)
+ if (hash_function16)
+ ss << hash_function16(arg);
+ else if (hash_function32)
ss << hash_function32(arg);
else if (hash_function64)
ss << hash_function64(arg);
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
+#include <unordered_set>
+
namespace flatbuffers {
// Pedantic warning free version of toupper().
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "::"),
cur_name_space_(nullptr) {
- const char *keywords[] = { "alignas",
+ static const char * const keywords[] = {
+ "alignas",
"alignof",
"and",
"and_eq",
std::string guard = file_name_;
// Remove any non-alpha-numeric characters that may appear in a filename.
struct IsAlnum {
- bool operator()(char c) { return !isalnum(c); }
+ bool operator()(char c) const { return !isalnum(c); }
};
guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()),
guard.end());
if (parser_.opts.include_dependence_headers) { GenIncludeDependencies(); }
- assert(!cur_name_space_);
+ FLATBUFFERS_ASSERT(!cur_name_space_);
// Generate forward declarations for all structs/tables, since they may
// have circular references.
code_ += "";
}
}
+ // Generate preablmle code for mini reflection.
+ if (parser_.opts.mini_reflect != IDLOptions::kNone) {
+ // To break cyclic dependencies, first pre-declare all tables/structs.
+ for (auto it = parser_.structs_.vec.begin();
+ it != parser_.structs_.vec.end(); ++it) {
+ const auto &struct_def = **it;
+ if (!struct_def.generated) {
+ SetNameSpace(struct_def.defined_namespace);
+ GenMiniReflectPre(&struct_def);
+ }
+ }
+ }
// Generate code for all the enum declarations.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
// Generate code for mini reflection.
if (parser_.opts.mini_reflect != IDLOptions::kNone) {
- // To break cyclic dependencies, first pre-declare all tables/structs.
- for (auto it = parser_.structs_.vec.begin();
- it != parser_.structs_.vec.end(); ++it) {
- const auto &struct_def = **it;
- if (!struct_def.generated) {
- SetNameSpace(struct_def.defined_namespace);
- GenMiniReflectPre(&struct_def);
- }
- }
// Then the unions/enums that may refer to them.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
code_ += "}";
code_ += "";
+ code_ += "inline \\";
+ code_ +=
+ "const {{CPP_NAME}} *{{NULLABLE_EXT}}GetSizePrefixed{{STRUCT_NAME}}(const void "
+ "*buf) {";
+ code_ += " return flatbuffers::GetSizePrefixedRoot<{{CPP_NAME}}>(buf);";
+ code_ += "}";
+ code_ += "";
+
if (parser_.opts.mutable_buffer) {
code_ += "inline \\";
code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {";
code_ += "}";
code_ += "";
+ code_ += "inline bool VerifySizePrefixed{{STRUCT_NAME}}Buffer(";
+ code_ += " flatbuffers::Verifier &verifier) {";
+ code_ += " return verifier.VerifySizePrefixedBuffer<{{CPP_NAME}}>({{ID}});";
+ code_ += "}";
+ code_ += "";
+
if (parser_.file_extension_.length()) {
// Return the extension
code_ += "inline const char *{{STRUCT_NAME}}Extension() {";
code_ += "}";
code_ += "";
+ code_ += "inline void FinishSizePrefixed{{STRUCT_NAME}}Buffer(";
+ code_ += " flatbuffers::FlatBufferBuilder &fbb,";
+ code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
+ if (parser_.file_identifier_.length())
+ code_ += " fbb.FinishSizePrefixed(root, {{STRUCT_NAME}}Identifier());";
+ else
+ code_ += " fbb.FinishSizePrefixed(root);";
+ code_ += "}";
+ code_ += "";
+
if (parser_.opts.generate_object_based_api) {
// A convenient root unpack function.
auto native_name =
private:
CodeWriter code_;
- std::set<std::string> keywords_;
+ std::unordered_set<std::string> keywords_;
// This tracks the current namespace so we can insert namespace declarations.
const Namespace *cur_name_space_;
// Return a C++ type from the table in idl.h
std::string GenTypeBasic(const Type &type, bool user_facing_type) const {
- static const char *ctypename[] = {
+ static const char * const ctypename[] = {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#CTYPE,
}
std::string GenPtrGet(const FieldDef &field) {
+ auto cpp_ptr_type_get = field.attributes.Lookup("cpp_ptr_type_get");
+ if (cpp_ptr_type_get)
+ return cpp_ptr_type_get->constant;
auto &ptr_type = PtrType(&field);
return ptr_type == "naked" ? "" : ".get()";
}
return actual_type ? (native_type ? "std::string" : "flatbuffers::String")
: Name(ev);
} else {
- assert(false);
+ FLATBUFFERS_ASSERT(false);
return Name(ev);
}
}
void GenMiniReflectPre(const StructDef *struct_def) {
code_.SetValue("NAME", struct_def->name);
- code_ += "inline flatbuffers::TypeTable *{{NAME}}TypeTable();";
+ code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable();";
code_ += "";
}
code_.SetValue("REFS", rs);
code_.SetValue("NAMES", ns);
code_.SetValue("VALUES", vs);
- code_ += "inline flatbuffers::TypeTable *{{NAME}}TypeTable() {";
+ code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable() {";
if (num_fields) {
- code_ += " static flatbuffers::TypeCode type_codes[] = {";
+ code_ += " static const flatbuffers::TypeCode type_codes[] = {";
code_ += " {{TYPES}}";
code_ += " };";
}
if (!type_refs.empty()) {
- code_ += " static flatbuffers::TypeFunction type_refs[] = {";
+ code_ += " static const flatbuffers::TypeFunction type_refs[] = {";
code_ += " {{REFS}}";
code_ += " };";
}
auto has_names =
num_fields && parser_.opts.mini_reflect == IDLOptions::kTypesAndNames;
if (has_names) {
- code_ += " static const char *names[] = {";
+ code_ += " static const char * const names[] = {";
code_ += " {{NAMES}}";
code_ += " };";
}
- code_ += " static flatbuffers::TypeTable tt = {";
+ code_ += " static const flatbuffers::TypeTable tt = {";
code_ += std::string(" flatbuffers::{{SEQ_TYPE}}, {{NUM_FIELDS}}, ") +
(num_fields ? "type_codes, " : "nullptr, ") +
(!type_refs.empty() ? "type_refs, " : "nullptr, ") +
}
if (parser_.opts.scoped_enums || parser_.opts.prefixed_enums) {
- assert(minv && maxv);
+ FLATBUFFERS_ASSERT(minv && maxv);
code_.SetValue("SEP", ",\n");
if (enum_def.attributes.Lookup("bit_flags")) {
code_ += "};";
if (parser_.opts.scoped_enums && enum_def.attributes.Lookup("bit_flags")) {
- code_ += "DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})";
+ code_ += "FLATBUFFERS_DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})";
}
code_ += "";
// Generate an array of all enumeration values
auto num_fields = NumToString(enum_def.vals.vec.size());
- code_ += "inline {{ENUM_NAME}} (&EnumValues{{ENUM_NAME}}())[" + num_fields +
+ code_ += "inline const {{ENUM_NAME}} (&EnumValues{{ENUM_NAME}}())[" + num_fields +
"] {";
- code_ += " static {{ENUM_NAME}} values[] = {";
+ code_ += " static const {{ENUM_NAME}} values[] = {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
static const int kMaxSparseness = 5;
if (range / static_cast<int64_t>(enum_def.vals.vec.size()) <
kMaxSparseness) {
- code_ += "inline const char **EnumNames{{ENUM_NAME}}() {";
- code_ += " static const char *names[] = {";
+ code_ += "inline const char * const *EnumNames{{ENUM_NAME}}() {";
+ code_ += " static const char * const names[] = {";
auto val = enum_def.vals.vec.front()->value;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
code_ += getptr;
code_ += " return verifier.Verify(ptr);";
} else {
- assert(false);
+ FLATBUFFERS_ASSERT(false);
}
code_ += " }";
} else {
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += " return new std::string(ptr->c_str(), ptr->size());";
} else {
- assert(false);
+ FLATBUFFERS_ASSERT(false);
}
code_ += " }";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += " return _fbb.CreateString(*ptr).Union();";
} else {
- assert(false);
+ FLATBUFFERS_ASSERT(false);
}
code_ += " }";
}
" value = new {{TYPE}}(*reinterpret_cast<{{TYPE}} *>"
"(u.value));";
} else {
- code_ += " assert(false); // {{TYPE}} not copyable.";
+ code_ += " FLATBUFFERS_ASSERT(false); // {{TYPE}} not copyable.";
}
code_ += " break;";
code_ += " }";
: field.value.constant;
}
- std::string GetDefaultScalarValue(const FieldDef &field) {
+ std::string GetDefaultScalarValue(const FieldDef &field, bool is_ctor) {
if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) {
auto ev = field.value.type.enum_def->ReverseLookup(
StringToInt(field.value.constant.c_str()), false);
}
} else if (field.value.type.base_type == BASE_TYPE_BOOL) {
return field.value.constant == "0" ? "false" : "true";
+ } else if (field.attributes.Lookup("cpp_type")) {
+ if (is_ctor) {
+ if (PtrType(&field) == "naked") {
+ return "nullptr";
+ } else {
+ return "";
+ }
+ } else {
+ return "0";
+ }
} else {
return GenDefaultConstant(field);
}
code_.SetValue("PARAM_VALUE", "nullptr");
} else {
code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true));
- code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field));
+ code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field, false));
}
code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\";
}
field.value.type.element != BASE_TYPE_UTYPE)) {
auto type = GenTypeNative(field.value.type, false, field);
auto cpp_type = field.attributes.Lookup("cpp_type");
- auto full_type = (cpp_type ? cpp_type->constant + " *" : type + " ");
+ auto full_type =
+ (cpp_type ? (field.value.type.base_type == BASE_TYPE_VECTOR
+ ? "std::vector<" + GenTypeNativePtr(cpp_type->constant, &field, false) + "> "
+ : GenTypeNativePtr(cpp_type->constant, &field, false))
+ : type + " ");
code_.SetValue("FIELD_TYPE", full_type);
code_.SetValue("FIELD_NAME", Name(field));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};";
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto cpp_type = field.attributes.Lookup("cpp_type");
+ auto native_default = field.attributes.Lookup("native_default");
// Scalar types get parsed defaults, raw pointers get nullptrs.
if (IsScalar(field.value.type.base_type)) {
if (!initializer_list.empty()) { initializer_list += ",\n "; }
initializer_list += Name(field);
- initializer_list += "(" + GetDefaultScalarValue(field) + ")";
+ initializer_list += "(" + (native_default ? std::string(native_default->constant) : GetDefaultScalarValue(field, true)) + ")";
} else if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
- auto native_default = field.attributes.Lookup("native_default");
if (native_default) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
Name(field) + "(" + native_default->constant + ")";
}
}
- } else if (cpp_type) {
+ } else if (cpp_type && field.value.type.base_type != BASE_TYPE_VECTOR) {
if (!initializer_list.empty()) { initializer_list += ",\n "; }
initializer_list += Name(field) + "(0)";
}
if (parser_.opts.generate_object_based_api) {
code_ += " typedef {{NATIVE_NAME}} NativeTableType;";
}
+ if (parser_.opts.mini_reflect != IDLOptions::kNone) {
+ code_ += " static const flatbuffers::TypeTable *MiniReflectTypeTable() {";
+ code_ += " return {{STRUCT_NAME}}TypeTable();";
+ code_ += " }";
+ }
+
GenFullyQualifiedNameGetter(struct_def, Name(struct_def));
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
- std::string qualified_name =
- parser_.current_namespace_->GetFullyQualifiedName(nested->constant);
- auto nested_root = parser_.LookupStruct(qualified_name);
- assert(nested_root); // Guaranteed to exist by parser.
+ std::string qualified_name = nested->constant;
+ auto nested_root = parser_.LookupStruct(nested->constant);
+ if (nested_root == nullptr) {
+ qualified_name = parser_.current_namespace_->GetFullyQualifiedName(
+ nested->constant);
+ nested_root = parser_.LookupStruct(qualified_name);
+ }
+ FLATBUFFERS_ASSERT(nested_root); // Guaranteed to exist by parser.
(void)nested_root;
code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name));
std::string code;
switch (field.value.type.base_type) {
case BASE_TYPE_VECTOR: {
+ auto cpp_type = field.attributes.Lookup("cpp_type");
std::string indexing;
if (field.value.type.enum_def) {
indexing += "(" + field.value.type.enum_def->name + ")";
code += "{ _o->" + name + ".resize(_e->size()); ";
code += "for (flatbuffers::uoffset_t _i = 0;";
code += " _i < _e->size(); _i++) { ";
- code += "_o->" + name + "[_i]" + access + " = ";
- code +=
+ if (cpp_type) {
+ // Generate code that resolves the cpp pointer type, of the form:
+ // if (resolver)
+ // (*resolver)(&_o->field, (hash_value_t)(_e));
+ // else
+ // _o->field = nullptr;
+ code += "//vector resolver, " + PtrType(&field) + "\n";
+ code += "if (_resolver) ";
+ code += "(*_resolver)";
+ code += "(reinterpret_cast<void **>(&_o->" + name + "[_i]" + access + "), ";
+ code += "static_cast<flatbuffers::hash_value_t>(" + indexing + "));";
+ if (PtrType(&field) == "naked") {
+ code += " else ";
+ code += "_o->" + name + "[_i]" + access + " = nullptr";
+ } else {
+ //code += " else ";
+ //code += "_o->" + name + "[_i]" + access + " = " + GenTypeNativePtr(cpp_type->constant, &field, true) + "();";
+ code += "/* else do nothing */";
+ }
+ } else {
+ code += "_o->" + name + "[_i]" + access + " = ";
+ code +=
GenUnpackVal(field.value.type.VectorType(), indexing, true, field);
+ }
code += "; } }";
break;
}
case BASE_TYPE_UTYPE: {
- assert(union_field->value.type.base_type == BASE_TYPE_UNION);
+ FLATBUFFERS_ASSERT(union_field->value.type.base_type == BASE_TYPE_UNION);
// Generate code that sets the union type, of the form:
// _o->field.type = _e;
code += "_o->" + union_field->name + ".type = _e;";
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
+ code += "//scalar resolver, " + PtrType(&field) + " \n";
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + Name(field) + "), ";
code += "static_cast<flatbuffers::hash_value_t>(_e));";
- code += " else ";
- code += "_o->" + Name(field) + " = nullptr;";
+ if (PtrType(&field) == "naked") {
+ code += " else ";
+ code += "_o->" + Name(field) + " = nullptr;";
+ } else {
+ //code += " else ";
+ //code += "_o->" + Name(field) + " = " + GenTypeNativePtr(cpp_type->constant, &field, true) + "();";
+ code += "/* else do nothing */;";
+ }
} else {
// Generate code for assigning the value, of the form:
// _o->field = value;
} else {
value += Name(field);
}
- if (field.attributes.Lookup("cpp_type")) {
+ if (field.value.type.base_type != BASE_TYPE_VECTOR && field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(field.value.type, false);
value =
"_rehasher ? "
"static_cast<" +
- type + ">((*_rehasher)(" + value + ")) : 0";
+ type + ">((*_rehasher)(" + value + GenPtrGet(field) + ")) : 0";
}
+
std::string code;
switch (field.value.type.base_type) {
// String fields are of the form:
field.value.type.enum_def->underlying_type, false);
code += "_fbb.CreateVector((const " + basetype + "*)" + value +
".data(), " + value + ".size())";
+ } else if (field.attributes.Lookup("cpp_type")) {
+ auto type = GenTypeBasic(vector_type, false);
+ code += "_fbb.CreateVector<" + type + ">(" + value + ".size(), ";
+ code += "[](size_t i, _VectorArgs *__va) { ";
+ code += "return __va->__rehasher ? ";
+ code += "static_cast<" + type + ">((*__va->__rehasher)";
+ code += "(__va->_" + value + "[i]" + GenPtrGet(field) + ")) : 0";
+ code += "; }, &_va )";
} else {
code += "_fbb.CreateVector(" + value + ")";
}
f((1 << i) * 8, code_ptr, id);
}
}
- assert(!(field.padding & ~0xF));
+ FLATBUFFERS_ASSERT(!(field.padding & ~0xF));
}
}
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_ +=
- "MANUALLY_ALIGNED_STRUCT({{ALIGN}}) "
+ "FLATBUFFERS_MANUALLY_ALIGNED_STRUCT({{ALIGN}}) "
"{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {";
code_ += " private:";
code_ += "};";
code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize));
- code_ += "STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});";
+ code_ += "FLATBUFFERS_STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});";
code_ += "";
}
EnumDef &enum_def = **enum_def_it;
GenNameSpace(*enum_def.defined_namespace, &schema, &last_namespace);
GenComment(enum_def.doc_comment, &schema, nullptr);
- schema += "enum " + enum_def.name + " : ";
+ if (enum_def.is_union)
+ schema += "union " + enum_def.name;
+ else
+ schema += "enum " + enum_def.name + " : ";
schema += GenType(enum_def.underlying_type, true) + " {\n";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
auto &ev = **it;
GenComment(ev.doc_comment, &schema, nullptr, " ");
- schema += " " + ev.name + " = " + NumToString(ev.value) + ",\n";
+ if (enum_def.is_union)
+ schema += " " + GenType(ev.union_type) + ",\n";
+ else
+ schema += " " + ev.name + " = " + NumToString(ev.value) + ",\n";
}
schema += "}\n\n";
}
namespace flatbuffers {
-// Convert an underscore_based_indentifier in to camelCase.
-// Also uppercases the first character if first is true.
-std::string MakeCamel(const std::string &in, bool first) {
- std::string s;
- for (size_t i = 0; i < in.length(); i++) {
- if (!i && first)
- s += static_cast<char>(toupper(in[0]));
- else if (in[i] == '_' && i + 1 < in.length())
- s += static_cast<char>(toupper(in[++i]));
- else
- s += in[i];
- }
- return s;
-}
-
// These arrays need to correspond to the IDLOptions::k enum.
struct LanguageParameters {
};
const LanguageParameters &GetLangParams(IDLOptions::Language lang) {
- static LanguageParameters language_parameters[] = {
+ static const LanguageParameters language_parameters[] = {
{
IDLOptions::kJava,
false,
if (lang == IDLOptions::kJava) {
return language_parameters[0];
} else {
- assert(lang == IDLOptions::kCSharp);
+ FLATBUFFERS_ASSERT(lang == IDLOptions::kCSharp);
return language_parameters[1];
}
}
// Save out the generated code for a single class while adding
// declaration boilerplate.
bool SaveType(const std::string &defname, const Namespace &ns,
- const std::string &classcode, bool needs_includes) {
+ const std::string &classcode, bool needs_includes) const {
if (!classcode.length()) return true;
std::string code;
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
- std::string FunctionStart(char upper) {
+ std::string FunctionStart(char upper) const {
return std::string() + (lang_.language == IDLOptions::kJava
? static_cast<char>(tolower(upper))
: upper);
}
- std::string GenNullableAnnotation(const Type &t) {
+ std::string GenNullableAnnotation(const Type &t) const {
return lang_.language == IDLOptions::kJava && parser_.opts.gen_nullable &&
!IsScalar(DestinationType(t, true).base_type)
? " @Nullable "
return type.enum_def != nullptr && IsInteger(type.base_type);
}
- std::string GenTypeBasic(const Type &type, bool enableLangOverrides) {
+ std::string GenTypeBasic(const Type &type, bool enableLangOverrides) const {
// clang-format off
- static const char *java_typename[] = {
+ static const char * const java_typename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, \
CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#JTYPE,
#undef FLATBUFFERS_TD
};
- static const char *csharp_typename[] = {
+ static const char * const csharp_typename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, \
CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#NTYPE,
if (lang_.language == IDLOptions::kJava) {
return java_typename[type.base_type];
} else {
- assert(lang_.language == IDLOptions::kCSharp);
+ FLATBUFFERS_ASSERT(lang_.language == IDLOptions::kCSharp);
return csharp_typename[type.base_type];
}
}
- std::string GenTypeBasic(const Type &type) {
+ std::string GenTypeBasic(const Type &type) const {
return GenTypeBasic(type, true);
}
- std::string GenTypePointer(const Type &type) {
+ std::string GenTypePointer(const Type &type) const {
switch (type.base_type) {
case BASE_TYPE_STRING: return lang_.string_type;
case BASE_TYPE_VECTOR: return GenTypeGet(type.VectorType());
}
}
- std::string GenTypeGet(const Type &type) {
+ std::string GenTypeGet(const Type &type) const {
return IsScalar(type.base_type) ? GenTypeBasic(type) : GenTypePointer(type);
}
// Find the destination type the user wants to receive the value in (e.g.
// one size higher signed types for unsigned serialized values in Java).
- Type DestinationType(const Type &type, bool vectorelem) {
+ Type DestinationType(const Type &type, bool vectorelem) const {
if (lang_.language != IDLOptions::kJava) return type;
switch (type.base_type) {
// We use int for both uchar/ushort, since that generally means less
}
}
- std::string GenOffsetType(const StructDef &struct_def) {
+ std::string GenOffsetType(const StructDef &struct_def) const {
if (lang_.language == IDLOptions::kCSharp) {
return "Offset<" + WrapInNameSpace(struct_def) + ">";
} else {
}
std::string GenOffsetConstruct(const StructDef &struct_def,
- const std::string &variable_name) {
+ const std::string &variable_name) const {
if (lang_.language == IDLOptions::kCSharp) {
return "new Offset<" + WrapInNameSpace(struct_def) + ">(" +
variable_name + ")";
return variable_name;
}
- std::string GenVectorOffsetType() {
+ std::string GenVectorOffsetType() const {
if (lang_.language == IDLOptions::kCSharp) {
return "VectorOffset";
} else {
}
// Generate destination type name
- std::string GenTypeNameDest(const Type &type) {
+ std::string GenTypeNameDest(const Type &type) const {
return GenTypeGet(DestinationType(type, true));
}
// Mask to turn serialized value into destination type value.
- std::string DestinationMask(const Type &type, bool vectorelem) {
+ std::string DestinationMask(const Type &type, bool vectorelem) const {
if (lang_.language != IDLOptions::kJava) return "";
switch (type.base_type) {
case BASE_TYPE_UCHAR: return " & 0xFF";
}
// Casts necessary to correctly read serialized data
- std::string DestinationCast(const Type &type) {
+ std::string DestinationCast(const Type &type) const {
if (type.base_type == BASE_TYPE_VECTOR) {
return DestinationCast(type.VectorType());
} else {
// would be cast down to int before being put onto the buffer. In C#, one cast
// directly cast an Enum to its underlying type, which is essential before
// putting it onto the buffer.
- std::string SourceCast(const Type &type, bool castFromDest) {
+ std::string SourceCast(const Type &type, bool castFromDest) const {
if (type.base_type == BASE_TYPE_VECTOR) {
return SourceCast(type.VectorType(), castFromDest);
} else {
return "";
}
- std::string SourceCast(const Type &type) { return SourceCast(type, true); }
+ std::string SourceCast(const Type &type) const { return SourceCast(type, true); }
- std::string SourceCastBasic(const Type &type, bool castFromDest) {
+ std::string SourceCastBasic(const Type &type, bool castFromDest) const {
return IsScalar(type.base_type) ? SourceCast(type, castFromDest) : "";
}
- std::string SourceCastBasic(const Type &type) {
+ std::string SourceCastBasic(const Type &type) const {
return SourceCastBasic(type, true);
}
- std::string GenEnumDefaultValue(const Value &value) {
+ std::string GenEnumDefaultValue(const Value &value) const {
auto enum_def = value.type.enum_def;
auto vec = enum_def->vals.vec;
auto default_value = StringToInt(value.constant.c_str());
return result;
}
- std::string GenDefaultValue(const Value &value, bool enableLangOverrides) {
+ std::string GenDefaultValue(const Value &value, bool enableLangOverrides) const {
if (enableLangOverrides) {
// handles both enum case and vector of enum case
if (lang_.language == IDLOptions::kCSharp &&
}
}
- std::string GenDefaultValue(const Value &value) {
+ std::string GenDefaultValue(const Value &value) const {
return GenDefaultValue(value, true);
}
std::string GenDefaultValueBasic(const Value &value,
- bool enableLangOverrides) {
+ bool enableLangOverrides) const {
if (!IsScalar(value.type.base_type)) {
if (enableLangOverrides) {
if (lang_.language == IDLOptions::kCSharp) {
return GenDefaultValue(value, enableLangOverrides);
}
- std::string GenDefaultValueBasic(const Value &value) {
+ std::string GenDefaultValueBasic(const Value &value) const {
return GenDefaultValueBasic(value, true);
}
- void GenEnum(EnumDef &enum_def, std::string *code_ptr) {
+ void GenEnum(EnumDef &enum_def, std::string *code_ptr) const {
std::string &code = *code_ptr;
if (enum_def.generated) return;
}
// Returns the function name that is able to read a value of the given type.
- std::string GenGetter(const Type &type) {
+ std::string GenGetter(const Type &type) const {
switch (type.base_type) {
case BASE_TYPE_STRING: return lang_.accessor_prefix + "__string";
case BASE_TYPE_STRUCT: return lang_.accessor_prefix + "__struct";
// Returns the function name that is able to read a value of the given type.
std::string GenGetterForLookupByKey(flatbuffers::FieldDef *key_field,
const std::string &data_buffer,
- const char *num = nullptr) {
+ const char *num = nullptr) const {
auto type = key_field->value.type;
auto dest_mask = DestinationMask(type, true);
auto dest_cast = DestinationCast(type);
// Direct mutation is only allowed for scalar fields.
// Hence a setter method will only be generated for such fields.
- std::string GenSetter(const Type &type) {
+ std::string GenSetter(const Type &type) const {
if (IsScalar(type.base_type)) {
std::string setter =
lang_.accessor_prefix + "bb." + FunctionStart('P') + "ut";
}
// Returns the method name for use with add/put calls.
- std::string GenMethod(const Type &type) {
+ std::string GenMethod(const Type &type) const {
return IsScalar(type.base_type) ? MakeCamel(GenTypeBasic(type, false))
: (IsStruct(type) ? "Struct" : "Offset");
}
// Recursively generate arguments for a constructor, to deal with nested
// structs.
void GenStructArgs(const StructDef &struct_def, std::string *code_ptr,
- const char *nameprefix) {
+ const char *nameprefix) const {
std::string &code = *code_ptr;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
// builder.putType(name);
// and insert manual padding.
void GenStructBody(const StructDef &struct_def, std::string *code_ptr,
- const char *nameprefix) {
+ const char *nameprefix) const {
std::string &code = *code_ptr;
code += " builder." + FunctionStart('P') + "rep(";
code += NumToString(struct_def.minalign) + ", ";
}
}
- std::string GenByteBufferLength(const char *bb_name) {
+ std::string GenByteBufferLength(const char *bb_name) const {
std::string bb_len = bb_name;
if (lang_.language == IDLOptions::kCSharp)
bb_len += ".Length";
}
std::string GenOffsetGetter(flatbuffers::FieldDef *key_field,
- const char *num = nullptr) {
+ const char *num = nullptr) const {
std::string key_offset = "";
key_offset += lang_.accessor_prefix_static + "__offset(" +
NumToString(key_field->value.offset) + ", ";
return key_offset;
}
- std::string GenLookupKeyGetter(flatbuffers::FieldDef *key_field) {
+ std::string GenLookupKeyGetter(flatbuffers::FieldDef *key_field) const {
std::string key_getter = " ";
key_getter += "int tableOffset = " + lang_.accessor_prefix_static;
key_getter += "__indirect(vectorLocation + 4 * (start + middle)";
return key_getter;
}
- std::string GenKeyGetter(flatbuffers::FieldDef *key_field) {
+ std::string GenKeyGetter(flatbuffers::FieldDef *key_field) const {
std::string key_getter = "";
auto data_buffer =
(lang_.language == IDLOptions::kCSharp) ? "builder.DataBuffer" : "_bb";
return key_getter;
}
- void GenStruct(StructDef &struct_def, std::string *code_ptr) {
+ void GenStruct(StructDef &struct_def, std::string *code_ptr) const {
if (struct_def.generated) return;
std::string &code = *code_ptr;
code += "(obj, o) : null";
}
break;
- default: assert(0);
+ default: FLATBUFFERS_ASSERT(0);
}
}
code += member_suffix;
code += GenTypeNameDest(key_field.value.type) + " key)";
code += offset_prefix;
code += qualified_name + ".__lookup_by_key(";
+ if (lang_.language == IDLOptions::kJava)
+ code += "null, ";
code += lang_.accessor_prefix + "__vector(o), key, ";
code += lang_.accessor_prefix + "bb) : null; ";
code += "}\n";
+ if (lang_.language == IDLOptions::kJava) {
+ code += " public " + qualified_name + lang_.optional_suffix + " ";
+ code += MakeCamel(field.name, lang_.first_camel_upper) + "ByKey(";
+ code += qualified_name + lang_.optional_suffix + " obj, ";
+ code += GenTypeNameDest(key_field.value.type) + " key)";
+ code += offset_prefix;
+ code += qualified_name + ".__lookup_by_key(obj, ";
+ code += lang_.accessor_prefix + "__vector(o), key, ";
+ code += lang_.accessor_prefix + "bb) : null; ";
+ code += "}\n";
+ }
break;
}
}
? 1
: InlineSize(field.value.type.VectorType()));
code += "); }\n";
+ code += " public ByteBuffer ";
+ code += MakeCamel(field.name, lang_.first_camel_upper);
+ code += "InByteBuffer(ByteBuffer _bb) { return ";
+ code += lang_.accessor_prefix + "__vector_in_bytebuffer(_bb, ";
+ code += NumToString(field.value.offset) + ", ";
+ code +=
+ NumToString(field.value.type.base_type == BASE_TYPE_STRING
+ ? 1
+ : InlineSize(field.value.type.VectorType()));
+ code += "); }\n";
break;
case IDLOptions::kCSharp:
code += " public ArraySegment<byte>? Get";
}
code += " return " + GenOffsetConstruct(struct_def, "o") + ";\n }\n";
if (parser_.root_struct_def_ == &struct_def) {
- code += " public static void ";
- code += FunctionStart('F') + "inish" + struct_def.name;
- code +=
- "Buffer(FlatBufferBuilder builder, " + GenOffsetType(struct_def);
- code += " offset) {";
- code += " builder." + FunctionStart('F') + "inish(offset";
- if (lang_.language == IDLOptions::kCSharp) { code += ".Value"; }
-
- if (parser_.file_identifier_.length())
- code += ", \"" + parser_.file_identifier_ + "\"";
- code += "); }\n";
+ std::string size_prefix[] = { "", "SizePrefixed" };
+ for (int i = 0; i < 2; ++i) {
+ code += " public static void ";
+ code += FunctionStart('F') + "inish" + size_prefix[i] +
+ struct_def.name;
+ code += "Buffer(FlatBufferBuilder builder, " +
+ GenOffsetType(struct_def);
+ code += " offset) {";
+ code += " builder." + FunctionStart('F') + "inish" + size_prefix[i] +
+ "(offset";
+ if (lang_.language == IDLOptions::kCSharp) { code += ".Value"; }
+
+ if (parser_.file_identifier_.length())
+ code += ", \"" + parser_.file_identifier_ + "\"";
+ code += "); }\n";
+ }
}
}
// Only generate key compare function for table,
}
code += "\n public static " + struct_def.name + lang_.optional_suffix;
- code += " __lookup_by_key(int vectorLocation, ";
+ code += " __lookup_by_key(";
+ if (lang_.language == IDLOptions::kJava)
+ code += struct_def.name + " obj, ";
+ code += "int vectorLocation, ";
code += GenTypeNameDest(key_field->value.type);
code += " key, ByteBuffer bb) {\n";
if (key_field->value.type.base_type == BASE_TYPE_STRING) {
code += " start += middle;\n";
code += " span -= middle;\n";
code += " } else {\n";
- code += " return new " + struct_def.name;
- code += "().__assign(tableOffset, bb);\n";
+ code += " return ";
+ if (lang_.language == IDLOptions::kJava)
+ code += "(obj == null ? new " + struct_def.name + "() : obj)";
+ else
+ code += "new " + struct_def.name + "()";
+ code += ".__assign(tableOffset, bb);\n";
code += " }\n }\n";
code += " return null;\n";
code += " }\n";
std::string GeneralMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
- assert(parser.opts.lang <= IDLOptions::kMAX);
+ FLATBUFFERS_ASSERT(parser.opts.lang <= IDLOptions::kMAX);
const auto &lang = GetLangParams(parser.opts.lang);
std::string make_rule;
namespace go {
// see https://golang.org/ref/spec#Keywords
-static const char *g_golang_keywords[] = {
+static const char * const g_golang_keywords[] = {
"break", "default", "func", "interface", "select", "case", "defer",
"go", "map", "struct", "chan", "else", "goto", "package",
"switch", "const", "fallthrough", "if", "range", "type", "continue",
break;
}
case BASE_TYPE_UNION: GetUnionField(struct_def, field, code_ptr); break;
- default: assert(0);
+ default: FLATBUFFERS_ASSERT(0);
}
}
if (field.value.type.base_type == BASE_TYPE_VECTOR) {
void Indent() { indent_++; }
void Outdent() {
indent_--;
- assert(indent_ >= 0);
+ FLATBUFFERS_ASSERT(indent_ >= 0);
}
private:
if (lang == IDLOptions::kJs) {
return js_language_parameters[0];
} else {
- assert(lang == IDLOptions::kTs);
+ FLATBUFFERS_ASSERT(lang == IDLOptions::kTs);
return js_language_parameters[1];
}
}
const std::string &file) {
const auto basename =
flatbuffers::StripPath(flatbuffers::StripExtension(file));
- if (basename == file_name_) { return typeName; }
+ if (basename == file_name_ || parser_.opts.generate_all) { return typeName; }
return GenFileNamespacePrefix(file) + "." + typeName;
}
GenPrefixedTypeName(GenTypeName(field.value.type, false, true),
field.value.type.enum_def->file) +
" {\n";
+
+ if (!parser_.opts.generate_all) {
+ imported_files.insert(field.value.type.enum_def->file);
+ }
} else {
code += "):" + GenTypeName(field.value.type, false, true) + " {\n";
}
code += ", " + GenBBAccess() + ") : null;\n";
}
- if (lang_.language == IDLOptions::kTs) {
+ if (lang_.language == IDLOptions::kTs && !parser_.opts.generate_all) {
imported_files.insert(field.value.type.struct_def->file);
}
vectortypename = GenPrefixedTypeName(
vectortypename, vectortype.struct_def->file);
code += prefix + ", obj?:" + vectortypename;
- imported_files.insert(vectortype.struct_def->file);
+
+ if (!parser_.opts.generate_all) {
+ imported_files.insert(vectortype.struct_def->file);
+ }
} else if (vectortype.base_type == BASE_TYPE_STRING) {
code += prefix + "):string\n";
code += prefix + ",optionalEncoding:flatbuffers.Encoding" +
" : null;\n";
break;
- default: assert(0);
+ default: FLATBUFFERS_ASSERT(0);
}
}
code += "};\n\n";
std::string JSMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
- assert(parser.opts.lang <= IDLOptions::kMAX);
+ FLATBUFFERS_ASSERT(parser.opts.lang <= IDLOptions::kMAX);
const auto &lang = GetJsLangParams(parser.opts.lang);
std::string filebase =
break;
}
case BASE_TYPE_UNION: GetUnionField(field, code_ptr); break;
- default: assert(0);
+ default: FLATBUFFERS_ASSERT(0);
}
}
if (field.value.type.base_type == BASE_TYPE_VECTOR) {
break;
}
case BASE_TYPE_UNION: GetUnionField(struct_def, field, code_ptr); break;
- default: assert(0);
+ default: FLATBUFFERS_ASSERT(0);
}
}
if (field.value.type.base_type == BASE_TYPE_VECTOR) {
case BASE_TYPE_UNION:
// If this assert hits, you have an corrupt buffer, a union type field
// was not present or was out of range.
- assert(union_type);
+ FLATBUFFERS_ASSERT(union_type);
return Print<const void *>(val, *union_type, indent, nullptr, opts,
_text);
case BASE_TYPE_STRUCT:
// clang-format on
}
break;
- default: assert(0);
+ default: FLATBUFFERS_ASSERT(0);
}
return true;
}
const void *val = nullptr;
if (fixed) {
// The only non-scalar fields in structs are structs.
- assert(IsStruct(fd.value.type));
+ FLATBUFFERS_ASSERT(IsStruct(fd.value.type));
val = reinterpret_cast<const Struct *>(table)->GetStruct<const void *>(
fd.value.offset);
} else if (fd.flexbuffer) {
bool GenerateText(const Parser &parser, const void *flatbuffer,
std::string *_text) {
std::string &text = *_text;
- assert(parser.root_struct_def_); // call SetRootType()
+ FLATBUFFERS_ASSERT(parser.root_struct_def_); // call SetRootType()
text.reserve(1024); // Reduce amount of inevitable reallocs.
- if (!GenStruct(*parser.root_struct_def_, GetRoot<Table>(flatbuffer), 0,
- parser.opts, _text)) {
+ auto root = parser.opts.size_prefixed ?
+ GetSizePrefixedRoot<Table>(flatbuffer) : GetRoot<Table>(flatbuffer);
+ if (!GenStruct(*parser.root_struct_def_, root, 0, parser.opts, _text)) {
return false;
}
text += NewLine(parser.opts);
return true;
}
+// Convert an underscore_based_indentifier in to camelCase.
+// Also uppercases the first character if first is true.
+std::string MakeCamel(const std::string &in, bool first) {
+ std::string s;
+ for (size_t i = 0; i < in.length(); i++) {
+ if (!i && first)
+ s += static_cast<char>(toupper(in[0]));
+ else if (in[i] == '_' && i + 1 < in.length())
+ s += static_cast<char>(toupper(in[++i]));
+ else
+ s += in[i];
+ }
+ return s;
+}
+
void Parser::Message(const std::string &msg) {
error_ = file_being_parsed_.length() ? AbsolutePath(file_being_parsed_) : "";
// clang-format off
};
static std::string TokenToString(int t) {
- static const char *tokens[] = {
+ static const char * const tokens[] = {
#define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) STRING,
FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN)
#undef FLATBUFFERS_TOKEN
}
// clang-format on
-std::string Parser::TokenToStringId(int t) {
+std::string Parser::TokenToStringId(int t) const {
return t == kTokenIdentifier ? attribute_ : TokenToString(t);
}
if (IsIdentifierStart(c)) {
// Collect all chars of an identifier:
const char *start = cursor_ - 1;
- while (isalnum(static_cast<unsigned char>(*cursor_)) ||
- *cursor_ == '_')
+ while (isalnum(static_cast<unsigned char>(*cursor_)) || *cursor_ == '_')
cursor_++;
attribute_.append(start, cursor_);
token_ = kTokenIdentifier;
}
// Check if a given token is next.
-bool Parser::Is(int t) { return t == token_; }
+bool Parser::Is(int t) const { return t == token_; }
-bool Parser::IsIdent(const char *id) {
+bool Parser::IsIdent(const char *id) const {
return token_ == kTokenIdentifier && attribute_ == id;
}
if (token_ == '=') {
NEXT();
- ECHECK(ParseSingleValue(field->value));
+ ECHECK(ParseSingleValue(&field->name, field->value));
if (!IsScalar(type.base_type) ||
(struct_def.fixed && field->value.constant != "0"))
return Error(
field->deprecated = field->attributes.Lookup("deprecated") != nullptr;
auto hash_name = field->attributes.Lookup("hash");
if (hash_name) {
- switch (type.base_type) {
+ switch ((type.base_type == BASE_TYPE_VECTOR) ? type.element : type.base_type) {
+ case BASE_TYPE_SHORT:
+ case BASE_TYPE_USHORT: {
+ if (FindHashFunction16(hash_name->constant.c_str()) == nullptr)
+ return Error("Unknown hashing algorithm for 16 bit types: " +
+ hash_name->constant);
+ break;
+ }
case BASE_TYPE_INT:
case BASE_TYPE_UINT: {
if (FindHashFunction32(hash_name->constant.c_str()) == nullptr)
}
default:
return Error(
- "only int, uint, long and ulong data types support hashing.");
+ "only short, ushort, int, uint, long and ulong data types support hashing.");
}
}
auto cpp_type = field->attributes.Lookup("cpp_type");
if (cpp_type) {
if (!hash_name)
return Error("cpp_type can only be used with a hashed field");
+ /// forcing cpp_ptr_type to 'naked' if unset
+ auto cpp_ptr_type = field->attributes.Lookup("cpp_ptr_type");
+ if (!cpp_ptr_type) {
+ auto val = new Value();
+ val->type = cpp_type->type;
+ val->constant = "naked";
+ field->attributes.Add("cpp_ptr_type", val);
+ }
}
if (field->deprecated && struct_def.fixed)
return Error("can't deprecate fields in a struct");
const StructDef *parent_struct_def) {
switch (val.type.base_type) {
case BASE_TYPE_UNION: {
- assert(field);
+ FLATBUFFERS_ASSERT(field);
std::string constant;
// Find corresponding type field we may have already parsed.
for (auto elem = field_stack_.rbegin();
// output these in alphabetical order, meaning it comes after this
// value. So we scan past the value to find it, then come back here.
auto type_name = field->name + UnionTypeFieldSuffix();
- assert(parent_struct_def);
+ FLATBUFFERS_ASSERT(parent_struct_def);
auto type_field = parent_struct_def->fields.Lookup(type_name);
- assert(type_field); // Guaranteed by ParseField().
+ FLATBUFFERS_ASSERT(type_field); // Guaranteed by ParseField().
// Remember where we are in the source file, so we can come back here.
auto backup = *static_cast<ParserState *>(this);
ECHECK(SkipAnyJsonValue()); // The table.
} else if (enum_val->union_type.base_type == BASE_TYPE_STRING) {
ECHECK(ParseString(val));
} else {
- assert(false);
+ FLATBUFFERS_ASSERT(false);
}
break;
}
(token_ == kTokenIdentifier || token_ == kTokenStringConstant)) {
ECHECK(ParseHash(val, field));
} else {
- ECHECK(ParseSingleValue(val));
+ ECHECK(ParseSingleValue(field ? &field->name : nullptr, val));
}
break;
}
- default: ECHECK(ParseSingleValue(val)); break;
+ default: ECHECK(ParseSingleValue(field ? &field->name : nullptr, val)); break;
}
return NoError();
}
void Parser::SerializeStruct(const StructDef &struct_def, const Value &val) {
- assert(val.constant.length() == struct_def.bytesize);
+ FLATBUFFERS_ASSERT(val.constant.length() == struct_def.bytesize);
builder_.Align(struct_def.minalign);
builder_.PushBytes(reinterpret_cast<const uint8_t *>(val.constant.c_str()),
struct_def.bytesize);
if (struct_def.fixed) {
builder_.ClearOffsets();
builder_.EndStruct();
- assert(value);
+ FLATBUFFERS_ASSERT(value);
// Temporarily store this struct in the value string, since it is to
// be serialized in-place elsewhere.
value->assign(
reinterpret_cast<const char *>(builder_.GetCurrentBufferPointer()),
struct_def.bytesize);
builder_.PopBytes(struct_def.bytesize);
- assert(!ovalue);
+ FLATBUFFERS_ASSERT(!ovalue);
} else {
auto val = builder_.EndTable(start);
if (ovalue) *ovalue = val;
// Create and initialize new parser
Parser nested_parser;
- assert(field->nested_flatbuffer);
+ FLATBUFFERS_ASSERT(field->nested_flatbuffer);
nested_parser.root_struct_def_ = field->nested_flatbuffer;
nested_parser.enums_ = enums_;
nested_parser.opts = opts;
NEXT();
for (;;) {
auto name = attribute_;
- EXPECT(kTokenIdentifier);
+ if (false == (Is(kTokenIdentifier) || Is(kTokenStringConstant)))
+ return Error("attribute name must be either identifier or string: " +
+ name);
if (known_attributes_.find(name) == known_attributes_.end())
return Error("user define attributes must be declared before use: " +
name);
+ NEXT();
auto e = new Value();
attributes->Add(name, e);
if (Is(':')) {
NEXT();
- ECHECK(ParseSingleValue(*e));
+ ECHECK(ParseSingleValue(&name, *e));
}
if (Is(')')) {
NEXT();
return NoError();
}
-CheckedError Parser::TryTypedValue(int dtoken, bool check, Value &e,
+CheckedError Parser::TryTypedValue(const std::string *name, int dtoken, bool check, Value &e,
BaseType req, bool *destmatch) {
bool match = dtoken == token_;
if (match) {
} else {
return Error(std::string("type mismatch: expecting: ") +
kTypeNames[e.type.base_type] +
- ", found: " + kTypeNames[req]);
+ ", found: " + kTypeNames[req] +
+ ", name: " + (name ? *name : "") +
+ ", value: " + e.constant);
}
}
NEXT();
}
CheckedError Parser::ParseHash(Value &e, FieldDef *field) {
- assert(field);
+ FLATBUFFERS_ASSERT(field);
Value *hash_name = field->attributes.Lookup("hash");
switch (e.type.base_type) {
+ case BASE_TYPE_SHORT: {
+ auto hash = FindHashFunction16(hash_name->constant.c_str());
+ int16_t hashed_value = static_cast<int16_t>(hash(attribute_.c_str()));
+ e.constant = NumToString(hashed_value);
+ break;
+ }
+ case BASE_TYPE_USHORT: {
+ auto hash = FindHashFunction16(hash_name->constant.c_str());
+ uint16_t hashed_value = hash(attribute_.c_str());
+ e.constant = NumToString(hashed_value);
+ break;
+ }
case BASE_TYPE_INT: {
auto hash = FindHashFunction32(hash_name->constant.c_str());
int32_t hashed_value = static_cast<int32_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
- default: assert(0);
+ default: FLATBUFFERS_ASSERT(0);
}
NEXT();
return NoError();
return Error("cannot parse value starting with: " + TokenToStringId(token_));
}
-CheckedError Parser::ParseSingleValue(Value &e) {
+CheckedError Parser::ParseSingleValue(const std::string *name, Value &e) {
// First see if this could be a conversion function:
if (token_ == kTokenIdentifier && *cursor_ == '(') {
auto functionname = attribute_;
NEXT();
EXPECT('(');
- ECHECK(ParseSingleValue(e));
+ ECHECK(ParseSingleValue(name, e));
EXPECT(')');
// clang-format off
#define FLATBUFFERS_FN_DOUBLE(name, op) \
e.constant = NumToString(strtod(attribute_.c_str(), &end));
if (*end) return Error("invalid float: " + attribute_);
} else {
- assert(0); // Shouldn't happen, we covered all types.
+ FLATBUFFERS_ASSERT(0); // Shouldn't happen, we covered all types.
e.constant = "0";
}
NEXT();
}
} else {
bool match = false;
- ECHECK(TryTypedValue(kTokenIntegerConstant, IsScalar(e.type.base_type), e,
+ ECHECK(TryTypedValue(name, kTokenIntegerConstant, IsScalar(e.type.base_type), e,
BASE_TYPE_INT, &match));
- ECHECK(TryTypedValue(kTokenFloatConstant, IsFloat(e.type.base_type), e,
+ ECHECK(TryTypedValue(name, kTokenFloatConstant, IsFloat(e.type.base_type), e,
BASE_TYPE_FLOAT, &match));
- ECHECK(TryTypedValue(kTokenStringConstant,
+ ECHECK(TryTypedValue(name, kTokenStringConstant,
e.type.base_type == BASE_TYPE_STRING, e,
BASE_TYPE_STRING, &match));
auto istrue = IsIdent("true");
if (istrue || IsIdent("false")) {
attribute_ = NumToString(istrue);
- ECHECK(TryTypedValue(kTokenIdentifier, IsBool(e.type.base_type), e,
+ ECHECK(TryTypedValue(name, kTokenIdentifier, IsBool(e.type.base_type), e,
BASE_TYPE_BOOL, &match));
}
if (!match) return TokenError();
NEXT();
std::string enum_name = attribute_;
EXPECT(kTokenIdentifier);
- auto &enum_def = *new EnumDef();
- enum_def.name = enum_name;
- enum_def.file = file_being_parsed_;
- enum_def.doc_comment = enum_comment;
- enum_def.is_union = is_union;
- enum_def.defined_namespace = current_namespace_;
- if (enums_.Add(current_namespace_->GetFullyQualifiedName(enum_name),
- &enum_def))
- return Error("enum already exists: " + enum_name);
- if (is_union) {
- enum_def.underlying_type.base_type = BASE_TYPE_UTYPE;
- enum_def.underlying_type.enum_def = &enum_def;
- } else {
- if (opts.proto_mode) {
- enum_def.underlying_type.base_type = BASE_TYPE_INT;
+ EnumDef *enum_def;
+ ECHECK(StartEnum(enum_name, is_union, &enum_def));
+ enum_def->doc_comment = enum_comment;
+ if (!is_union && !opts.proto_mode) {
+ // Give specialized error message, since this type spec used to
+ // be optional in the first FlatBuffers release.
+ if (!Is(':')) {
+ return Error(
+ "must specify the underlying integer type for this"
+ " enum (e.g. \': short\', which was the default).");
} else {
- // Give specialized error message, since this type spec used to
- // be optional in the first FlatBuffers release.
- if (!Is(':')) {
- return Error(
- "must specify the underlying integer type for this"
- " enum (e.g. \': short\', which was the default).");
- } else {
- NEXT();
- }
- // Specify the integer type underlying this enum.
- ECHECK(ParseType(enum_def.underlying_type));
- if (!IsInteger(enum_def.underlying_type.base_type))
- return Error("underlying enum type must be integral");
+ NEXT();
}
+ // Specify the integer type underlying this enum.
+ ECHECK(ParseType(enum_def->underlying_type));
+ if (!IsInteger(enum_def->underlying_type.base_type))
+ return Error("underlying enum type must be integral");
// Make this type refer back to the enum it was derived from.
- enum_def.underlying_type.enum_def = &enum_def;
+ enum_def->underlying_type.enum_def = enum_def;
}
- ECHECK(ParseMetaData(&enum_def.attributes));
+ ECHECK(ParseMetaData(&enum_def->attributes));
EXPECT('{');
- if (is_union) enum_def.vals.Add("NONE", new EnumVal("NONE", 0));
+ if (is_union) enum_def->vals.Add("NONE", new EnumVal("NONE", 0));
for (;;) {
if (opts.proto_mode && attribute_ == "option") {
ECHECK(ParseProtoOption());
std::replace(value_name.begin(), value_name.end(), '.', '_');
}
}
- auto prevsize = enum_def.vals.vec.size();
- auto value =
- !enum_def.vals.vec.empty() ? enum_def.vals.vec.back()->value + 1 : 0;
+ auto prevsize = enum_def->vals.vec.size();
+ auto value = !enum_def->vals.vec.empty()
+ ? enum_def->vals.vec.back()->value + 1
+ : 0;
auto &ev = *new EnumVal(value_name, value);
- if (enum_def.vals.Add(value_name, &ev))
+ if (enum_def->vals.Add(value_name, &ev))
return Error("enum value already exists: " + value_name);
ev.doc_comment = value_comment;
if (is_union) {
if (ev.union_type.base_type != BASE_TYPE_STRUCT &&
ev.union_type.base_type != BASE_TYPE_STRING)
return Error("union value type may only be table/struct/string");
- enum_def.uses_type_aliases = true;
+ enum_def->uses_type_aliases = true;
} else {
ev.union_type = Type(BASE_TYPE_STRUCT, LookupCreateStruct(full_name));
}
ev.value = StringToInt(attribute_.c_str());
EXPECT(kTokenIntegerConstant);
if (!opts.proto_mode && prevsize &&
- enum_def.vals.vec[prevsize - 1]->value >= ev.value)
+ enum_def->vals.vec[prevsize - 1]->value >= ev.value)
return Error("enum values must be specified in ascending order");
}
if (is_union) {
if (Is('}')) break;
}
EXPECT('}');
- if (enum_def.attributes.Lookup("bit_flags")) {
- for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
+ if (enum_def->attributes.Lookup("bit_flags")) {
+ for (auto it = enum_def->vals.vec.begin(); it != enum_def->vals.vec.end();
++it) {
if (static_cast<size_t>((*it)->value) >=
- SizeOf(enum_def.underlying_type.base_type) * 8)
+ SizeOf(enum_def->underlying_type.base_type) * 8)
return Error("bit flag out of range of underlying integral type");
(*it)->value = 1LL << (*it)->value;
}
}
- if (dest) *dest = &enum_def;
- types_.Add(current_namespace_->GetFullyQualifiedName(enum_def.name),
- new Type(BASE_TYPE_UNION, nullptr, &enum_def));
+ if (dest) *dest = enum_def;
+ types_.Add(current_namespace_->GetFullyQualifiedName(enum_def->name),
+ new Type(BASE_TYPE_UNION, nullptr, enum_def));
return NoError();
}
return NoError();
}
+CheckedError Parser::StartEnum(const std::string &enum_name, bool is_union,
+ EnumDef **dest) {
+ auto &enum_def = *new EnumDef();
+ enum_def.name = enum_name;
+ enum_def.file = file_being_parsed_;
+ enum_def.doc_comment = doc_comment_;
+ enum_def.is_union = is_union;
+ enum_def.defined_namespace = current_namespace_;
+ if (enums_.Add(current_namespace_->GetFullyQualifiedName(enum_name),
+ &enum_def))
+ return Error("enum already exists: " + enum_name);
+ enum_def.underlying_type.base_type = is_union ? BASE_TYPE_UTYPE
+ : BASE_TYPE_INT;
+ enum_def.underlying_type.enum_def = &enum_def;
+ if (dest) *dest = &enum_def;
+ return NoError();
+}
+
CheckedError Parser::ParseProtoFields(StructDef *struct_def, bool isextend,
bool inside_oneof) {
EXPECT('{');
}
}
StructDef *anonymous_struct = nullptr;
+ EnumDef *oneof_union = nullptr;
Type type;
if (IsIdent("group") || oneof) {
if (!oneof) NEXT();
- auto name = "Anonymous" + NumToString(anonymous_counter++);
- ECHECK(StartStruct(name, &anonymous_struct));
- type = Type(BASE_TYPE_STRUCT, anonymous_struct);
+ if (oneof && opts.proto_oneof_union) {
+ auto name = MakeCamel(attribute_, true) + "Union";
+ ECHECK(StartEnum(name, true, &oneof_union));
+ type = Type(BASE_TYPE_UNION, nullptr, oneof_union);
+ } else {
+ auto name = "Anonymous" + NumToString(anonymous_counter++);
+ ECHECK(StartStruct(name, &anonymous_struct));
+ type = Type(BASE_TYPE_STRUCT, anonymous_struct);
+ }
} else {
ECHECK(ParseTypeFromProtoType(&type));
}
if (anonymous_struct) {
ECHECK(ParseProtoFields(anonymous_struct, false, oneof));
if (Is(';')) NEXT();
+ } else if (oneof_union) {
+ // Parse into a temporary StructDef, then transfer fields into an
+ // EnumDef describing the oneof as a union.
+ StructDef oneof_struct;
+ ECHECK(ParseProtoFields(&oneof_struct, false, oneof));
+ if (Is(';')) NEXT();
+ for (auto field_it = oneof_struct.fields.vec.begin();
+ field_it != oneof_struct.fields.vec.end(); ++field_it) {
+ const auto &oneof_field = **field_it;
+ const auto &oneof_type = oneof_field.value.type;
+ if (oneof_type.base_type != BASE_TYPE_STRUCT ||
+ !oneof_type.struct_def || oneof_type.struct_def->fixed)
+ return Error("oneof '" + name +
+ "' cannot be mapped to a union because member '" +
+ oneof_field.name + "' is not a table type.");
+ auto enum_val = new EnumVal(oneof_type.struct_def->name,
+ oneof_union->vals.vec.size());
+ enum_val->union_type = oneof_type;
+ enum_val->doc_comment = oneof_field.doc_comment;
+ oneof_union->vals.Add(oneof_field.name, enum_val);
+ }
} else {
EXPECT(';');
}
auto &bt = field.value.type.base_type == BASE_TYPE_VECTOR
? field.value.type.element
: field.value.type.base_type;
- assert(bt == BASE_TYPE_STRUCT);
+ FLATBUFFERS_ASSERT(bt == BASE_TYPE_STRUCT);
bt = enum_def->underlying_type.base_type;
struct_def.refcount--;
enum_def->refcount++;
}
uoffset_t toff;
ECHECK(ParseTable(*root_struct_def_, nullptr, &toff));
- builder_.Finish(Offset<Table>(toff), file_identifier_.length()
- ? file_identifier_.c_str()
- : nullptr);
+ if (opts.size_prefixed) {
+ builder_.FinishSizePrefixed(Offset<Table>(toff), file_identifier_.length()
+ ? file_identifier_.c_str()
+ : nullptr);
+ } else {
+ builder_.Finish(Offset<Table>(toff), file_identifier_.length()
+ ? file_identifier_.c_str()
+ : nullptr);
+ }
} else if (IsIdent("enum")) {
ECHECK(ParseEnum(false, nullptr));
} else if (IsIdent("union")) {
} else if (IsIdent("attribute")) {
NEXT();
auto name = attribute_;
- EXPECT(kTokenStringConstant);
+ if (Is(kTokenIdentifier)) {
+ NEXT();
+ } else {
+ EXPECT(kTokenStringConstant);
+ }
EXPECT(';');
known_attributes_[name] = false;
} else if (IsIdent("rpc_service")) {
builder_.CreateString(file_identifier_),
builder_.CreateString(file_extension_),
root_struct_def_ ? root_struct_def_->serialized_location : 0);
- builder_.Finish(schema_offset, reflection::SchemaIdentifier());
+ if (opts.size_prefixed) {
+ builder_.FinishSizePrefixed(schema_offset, reflection::SchemaIdentifier());
+ } else {
+ builder_.Finish(schema_offset, reflection::SchemaIdentifier());
+ }
}
Offset<reflection::Object> StructDef::Serialize(FlatBufferBuilder *builder,
std::vector<flatbuffers::Offset<reflection::KeyValue>> attrs;
for (auto kv = attributes.dict.begin(); kv != attributes.dict.end(); ++kv) {
auto it = parser.known_attributes_.find(kv->first);
- assert(it != parser.known_attributes_.end());
+ FLATBUFFERS_ASSERT(it != parser.known_attributes_.end());
if (!it->second) { // Custom attribute.
attrs.push_back(reflection::CreateKeyValue(
*builder, builder->CreateString(kv->first),
break;
}
case reflection::String: break;
- default: assert(false);
+ default: FLATBUFFERS_ASSERT(false);
}
}
// Check if the vtable offset points beyond the insertion point.
}
}
}
- assert(offset_idx == offsets.size());
+ FLATBUFFERS_ASSERT(offset_idx == offsets.size());
if (objectdef.is_struct()) {
fbb.ClearOffsets();
return fbb.EndStruct();
bool VerifyVector(flatbuffers::Verifier &v, const reflection::Schema &schema,
const flatbuffers::Table &table,
const reflection::Field &vec_field) {
- assert(vec_field.type()->base_type() == reflection::Vector);
+ FLATBUFFERS_ASSERT(vec_field.type()->base_type() == reflection::Vector);
if (!table.VerifyField<uoffset_t>(v, vec_field.offset())) return false;
switch (vec_field.type()->element()) {
- case reflection::None: assert(false); break;
+ case reflection::None: FLATBUFFERS_ASSERT(false); break;
case reflection::UType:
return v.Verify(flatbuffers::GetFieldV<uint8_t>(table, vec_field));
case reflection::Bool:
return false;
}
}
- case reflection::Vector: assert(false); break;
+ case reflection::Vector: FLATBUFFERS_ASSERT(false); break;
case reflection::Obj: {
auto obj = schema.objects()->Get(vec_field.type()->index());
if (obj->is_struct()) {
}
return true;
}
- case reflection::Union: assert(false); break;
- default: assert(false); break;
+ case reflection::Union: FLATBUFFERS_ASSERT(false); break;
+ default: FLATBUFFERS_ASSERT(false); break;
}
return false;
for (uoffset_t i = 0; i < obj.fields()->size(); i++) {
auto field_def = obj.fields()->Get(i);
switch (field_def->type()->base_type()) {
- case reflection::None: assert(false); break;
+ case reflection::None: FLATBUFFERS_ASSERT(false); break;
case reflection::UType:
if (!table->VerifyField<uint8_t>(v, field_def->offset())) return false;
break;
}
break;
}
- default: assert(false); break;
+ default: FLATBUFFERS_ASSERT(false); break;
}
}
static FileExistsFunction g_file_exists_function = FileExistsRaw;
bool LoadFile(const char *name, bool binary, std::string *buf) {
- assert(g_load_file_function);
+ FLATBUFFERS_ASSERT(g_load_file_function);
return g_load_file_function(name, binary, buf);
}
bool FileExists(const char *name) {
- assert(g_file_exists_function);
+ FLATBUFFERS_ASSERT(g_file_exists_function);
return g_file_exists_function(name);
}
[FlatBuffersTestMethod]
public void ByteBuffer_PutByteCannotPutAtOffsetPastLength()
{
- var buffer = new byte[1];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(1);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutByte(1, 99));
}
#endif
[FlatBuffersTestMethod]
public void ByteBuffer_PutShortCannotPutAtOffsetPastLength()
{
- var buffer = new byte[2];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(2);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutShort(2, 99));
}
#endif
[FlatBuffersTestMethod]
public void ByteBuffer_PutShortChecksLength()
{
- var buffer = new byte[1];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(1);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutShort(0, 99));
}
[FlatBuffersTestMethod]
public void ByteBuffer_PutShortChecksLengthAndOffset()
{
- var buffer = new byte[2];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(2);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutShort(1, 99));
}
#endif
[FlatBuffersTestMethod]
public void ByteBuffer_PutIntCannotPutAtOffsetPastLength()
{
- var buffer = new byte[4];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(4);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutInt(2, 0x0A0B0C0D));
}
[FlatBuffersTestMethod]
public void ByteBuffer_PutIntChecksLength()
{
- var buffer = new byte[1];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(1);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutInt(0, 0x0A0B0C0D));
}
[FlatBuffersTestMethod]
public void ByteBuffer_PutIntChecksLengthAndOffset()
{
- var buffer = new byte[4];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(4);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutInt(2, 0x0A0B0C0D));
}
#endif
[FlatBuffersTestMethod]
public void ByteBuffer_PutLongCannotPutAtOffsetPastLength()
{
- var buffer = new byte[8];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(8);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutLong(2, 0x010203040A0B0C0D));
}
[FlatBuffersTestMethod]
public void ByteBuffer_PutLongChecksLength()
{
- var buffer = new byte[1];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(1);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutLong(0, 0x010203040A0B0C0D));
}
[FlatBuffersTestMethod]
public void ByteBuffer_PutLongChecksLengthAndOffset()
{
- var buffer = new byte[8];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(8);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.PutLong(2, 0x010203040A0B0C0D));
}
#endif
[FlatBuffersTestMethod]
public void ByteBuffer_GetByteChecksOffset()
{
- var buffer = new byte[1];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(1);
Assert.Throws<ArgumentOutOfRangeException>(()=>uut.Get(1));
}
#endif
[FlatBuffersTestMethod]
public void ByteBuffer_GetShortChecksOffset()
{
- var buffer = new byte[2];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(2);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.GetShort(2));
}
[FlatBuffersTestMethod]
public void ByteBuffer_GetShortChecksLength()
{
- var buffer = new byte[2];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(2);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.GetShort(1));
}
#endif
[FlatBuffersTestMethod]
public void ByteBuffer_GetIntChecksOffset()
{
- var buffer = new byte[4];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(4);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.GetInt(4));
}
[FlatBuffersTestMethod]
public void ByteBuffer_GetIntChecksLength()
{
- var buffer = new byte[2];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(2);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.GetInt(0));
}
#endif
[FlatBuffersTestMethod]
public void ByteBuffer_GetLongChecksOffset()
{
- var buffer = new byte[8];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(8);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.GetLong(8));
}
[FlatBuffersTestMethod]
public void ByteBuffer_GetLongChecksLength()
{
- var buffer = new byte[7];
- var uut = new ByteBuffer(buffer);
+ var uut = new ByteBuffer(7);
Assert.Throws<ArgumentOutOfRangeException>(() => uut.GetLong(0));
}
#endif
var rereverse = ByteBuffer.ReverseBytes(reverse);
Assert.AreEqual(original, rereverse);
}
+
+ [FlatBuffersTestMethod]
+ public void ByteBuffer_ToFullArray_MatchesBuffer()
+ {
+ var buffer = new byte[4];
+ buffer[0] = 0x0D;
+ buffer[1] = 0x0C;
+ buffer[2] = 0x0B;
+ buffer[3] = 0x0A;
+ var uut = new ByteBuffer(buffer);
+ Assert.ArrayEqual(buffer, uut.ToFullArray());
+ }
+
+ [FlatBuffersTestMethod]
+ public void ByteBuffer_ToSizedArray_MatchesBuffer()
+ {
+ var buffer = new byte[4];
+ buffer[0] = 0x0D;
+ buffer[1] = 0x0C;
+ buffer[2] = 0x0B;
+ buffer[3] = 0x0A;
+ var uut = new ByteBuffer(buffer);
+ Assert.ArrayEqual(buffer, uut.ToFullArray());
+ }
+
+ [FlatBuffersTestMethod]
+ public void ByteBuffer_Duplicate_MatchesBuffer()
+ {
+ var buffer = new byte[4];
+ buffer[0] = 0x0D;
+ buffer[1] = 0x0C;
+ buffer[2] = 0x0B;
+ buffer[3] = 0x0A;
+ var uut = new ByteBuffer(buffer);
+ Assert.AreEqual(0x0A0B0C0D, uut.GetInt(0));
+
+ // Advance by two bytes
+ uut.Position = 2; uut = uut.Duplicate();
+ Assert.AreEqual(0x0A0B, uut.GetShort(2));
+
+ // Advance by one more byte
+ uut.Position = 1; uut = uut.Duplicate();
+ Assert.AreEqual(0x0A, uut.Get(3));
+ }
}
}
<Compile Include="..\..\net\FlatBuffers\ByteBuffer.cs">
<Link>FlatBuffers\ByteBuffer.cs</Link>
</Compile>
+ <Compile Include="..\..\net\FlatBuffers\ByteBufferUtil.cs">
+ <Link>FlatBuffers\ByteBufferUtil.cs</Link>
+ </Compile>
<Compile Include="..\..\net\FlatBuffers\IFlatbufferObject.cs">
<Link>FlatBuffers\IFlatbufferObject.cs</Link>
</Compile>
<Compile Include="..\MyGame\Example\Monster.cs">
<Link>MyGame\Example\Monster.cs</Link>
</Compile>
+ <Compile Include="..\MyGame\Example\Referrable.cs">
+ <Link>MyGame\Example\Referrable.cs</Link>
+ </Compile>
<Compile Include="..\MyGame\Example\Stat.cs">
<Link>MyGame\Example\Stat.cs</Link>
</Compile>
[FlatBuffersTestMethod]
public void CanCreateNewFlatBufferFromScratch()
+ {
+ CanCreateNewFlatBufferFromScratch(true);
+ CanCreateNewFlatBufferFromScratch(false);
+ }
+
+ private void CanCreateNewFlatBufferFromScratch(bool sizePrefix)
{
// Second, let's create a FlatBuffer from scratch in C#, and test it also.
// We use an initial size of 1 to exercise the reallocation algorithm,
Monster.AddTestarrayoftables(fbb, sortMons);
var mon = Monster.EndMonster(fbb);
- Monster.FinishMonsterBuffer(fbb, mon);
+ if (sizePrefix)
+ {
+ Monster.FinishSizePrefixedMonsterBuffer(fbb, mon);
+ }
+ else
+ {
+ Monster.FinishMonsterBuffer(fbb, mon);
+ }
// Dump to output directory so we can inspect later, if needed
- using (var ms = new MemoryStream(fbb.DataBuffer.Data, fbb.DataBuffer.Position, fbb.Offset))
+ using (var ms = fbb.DataBuffer.ToMemoryStream(fbb.DataBuffer.Position, fbb.Offset))
{
var data = ms.ToArray();
- File.WriteAllBytes(@"Resources/monsterdata_cstest.mon",data);
+ string filename = @"Resources/monsterdata_cstest" + (sizePrefix ? "_sp" : "") + ".mon";
+ File.WriteAllBytes(filename, data);
+ }
+
+ // Remove the size prefix if necessary for further testing
+ ByteBuffer dataBuffer = fbb.DataBuffer;
+ if (sizePrefix)
+ {
+ Assert.AreEqual(ByteBufferUtil.GetSizePrefix(dataBuffer) + FlatBufferConstants.SizePrefixLength,
+ dataBuffer.Length - dataBuffer.Position);
+ dataBuffer = ByteBufferUtil.RemoveSizePrefix(dataBuffer);
}
// Now assert the buffer
- TestBuffer(fbb.DataBuffer);
+ TestBuffer(dataBuffer);
//Attempt to mutate Monster fields and check whether the buffer has been mutated properly
// revert to original values after testing
- Monster monster = Monster.GetRootAsMonster(fbb.DataBuffer);
+ Monster monster = Monster.GetRootAsMonster(dataBuffer);
+
// mana is optional and does not exist in the buffer so the mutation should fail
// the mana field should retain its default value
pos.MutateX(1.0f);
Assert.AreEqual(pos.X, 1.0f);
- TestBuffer(fbb.DataBuffer);
+ TestBuffer(dataBuffer);
}
private void TestBuffer(ByteBuffer bb)
{
- var monster = Monster.GetRootAsMonster(bb);
+ Monster monster = Monster.GetRootAsMonster(bb);
Assert.AreEqual(80, monster.Hp);
Assert.AreEqual(150, monster.Mana);
public void TestNumbers()
{
var builder = new FlatBufferBuilder(1);
- Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.ToFullArray());
builder.AddBool(true);
- Assert.ArrayEqual(new byte[] { 1 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 1 }, builder.DataBuffer.ToFullArray());
builder.AddSbyte(-127);
- Assert.ArrayEqual(new byte[] { 129, 1 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 129, 1 }, builder.DataBuffer.ToFullArray());
builder.AddByte(255);
- Assert.ArrayEqual(new byte[] { 0, 255, 129, 1 }, builder.DataBuffer.Data); // First pad
+ Assert.ArrayEqual(new byte[] { 0, 255, 129, 1 }, builder.DataBuffer.ToFullArray()); // First pad
builder.AddShort(-32222);
- Assert.ArrayEqual(new byte[] { 0, 0, 0x22, 0x82, 0, 255, 129, 1 }, builder.DataBuffer.Data); // Second pad
+ Assert.ArrayEqual(new byte[] { 0, 0, 0x22, 0x82, 0, 255, 129, 1 }, builder.DataBuffer.ToFullArray()); // Second pad
builder.AddUshort(0xFEEE);
- Assert.ArrayEqual(new byte[] { 0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1 }, builder.DataBuffer.Data); // no pad
+ Assert.ArrayEqual(new byte[] { 0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1 }, builder.DataBuffer.ToFullArray()); // no pad
builder.AddInt(-53687092);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 204, 204, 204, 252, 0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1 }, builder.DataBuffer.Data); // third pad
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 204, 204, 204, 252, 0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1 }, builder.DataBuffer.ToFullArray()); // third pad
builder.AddUint(0x98765432);
- Assert.ArrayEqual(new byte[] { 0x32, 0x54, 0x76, 0x98, 204, 204, 204, 252, 0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1 }, builder.DataBuffer.Data); // no pad
+ Assert.ArrayEqual(new byte[] { 0x32, 0x54, 0x76, 0x98, 204, 204, 204, 252, 0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1 }, builder.DataBuffer.ToFullArray()); // no pad
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.AddUlong(0x1122334455667788);
- Assert.ArrayEqual(new byte[] { 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 }, builder.DataBuffer.ToFullArray());
builder = new FlatBufferBuilder(1);
builder.AddLong(0x1122334455667788);
- Assert.ArrayEqual(new byte[] { 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 }, builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartVector(sizeof(byte), 1, 1);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }, builder.DataBuffer.ToFullArray());
builder.AddByte(1);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 1, 0, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 1, 0, 0, 0 }, builder.DataBuffer.ToFullArray());
builder.EndVector();
- Assert.ArrayEqual(new byte[] { 1, 0, 0, 0, 1, 0, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 1, 0, 0, 0, 1, 0, 0, 0 }, builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartVector(sizeof(byte), 2, 1);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }, builder.DataBuffer.ToFullArray());
builder.AddByte(1);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 1, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 1, 0, 0 }, builder.DataBuffer.ToFullArray());
builder.AddByte(2);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 2, 1, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 2, 1, 0, 0 }, builder.DataBuffer.ToFullArray());
builder.EndVector();
- Assert.ArrayEqual(new byte[] { 2, 0, 0, 0, 2, 1, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 2, 0, 0, 0, 2, 1, 0, 0 }, builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartVector(sizeof(ushort), 1, 1);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }, builder.DataBuffer.ToFullArray());
builder.AddUshort(1);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 1, 0, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 1, 0, 0, 0 }, builder.DataBuffer.ToFullArray());
builder.EndVector();
- Assert.ArrayEqual(new byte[] { 1, 0, 0, 0, 1, 0, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 1, 0, 0, 0, 1, 0, 0, 0 }, builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartVector(sizeof(ushort), 2, 1);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }, builder.DataBuffer.ToFullArray());
builder.AddUshort(0xABCD);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0xCD, 0xAB }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0, 0, 0xCD, 0xAB }, builder.DataBuffer.ToFullArray());
builder.AddUshort(0xDCBA);
- Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0xBA, 0xDC, 0xCD, 0xAB }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0, 0, 0, 0, 0xBA, 0xDC, 0xCD, 0xAB }, builder.DataBuffer.ToFullArray());
builder.EndVector();
- Assert.ArrayEqual(new byte[] { 2, 0, 0, 0, 0xBA, 0xDC, 0xCD, 0xAB }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 2, 0, 0, 0, 0xBA, 0xDC, 0xCD, 0xAB }, builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.CreateString("foo");
- Assert.ArrayEqual(new byte[] { 3, 0, 0, 0, (byte)'f', (byte)'o', (byte)'o', 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 3, 0, 0, 0, (byte)'f', (byte)'o', (byte)'o', 0 }, builder.DataBuffer.ToFullArray());
builder.CreateString("moop");
Assert.ArrayEqual(new byte[]
0, 0, 0, 0, // zero terminator with 3 byte pad
3, 0, 0, 0,
(byte)'f', (byte)'o', (byte)'o', 0
- }, builder.DataBuffer.Data);
+ }, builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
3, 0, 0, 0,
0x01, 0x02, 0x03, 0
- }, builder.DataBuffer.Data); // No padding
+ }, builder.DataBuffer.ToFullArray()); // No padding
builder.CreateString("\x04\x05\x06\x07");
Assert.ArrayEqual(new byte[]
{
0, 0, 0, 0, // zero terminator with 3 byte pad
3, 0, 0, 0,
0x01, 0x02, 0x03, 0
- }, builder.DataBuffer.Data); // No padding
+ }, builder.DataBuffer.ToFullArray()); // No padding
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartObject(0);
- Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.ToFullArray());
builder.EndObject();
Assert.ArrayEqual(new byte[]
{
4, 0, 4, 0,
4, 0, 0, 0
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartObject(1);
- Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.ToFullArray());
builder.AddBool(0, true, false);
builder.EndObject();
Assert.ArrayEqual(new byte[]
0, 0, 0, // padding
1, // value 0
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartObject(1);
- Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.ToFullArray());
builder.AddBool(0, false, false);
builder.EndObject();
Assert.ArrayEqual(new byte[]
// entry 0 is not stored (trimmed end of vtable)
4, 0, 0, 0, // int32 offset for start of vtable
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartObject(1);
- Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.ToFullArray());
builder.AddShort(0, 0x789A, 0);
builder.EndObject();
Assert.ArrayEqual(new byte[]
0, 0, // padding
0x9A, 0x78, //value 0
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartObject(2);
- Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.ToFullArray());
builder.AddShort(0, 0x3456, 0);
builder.AddShort(1, 0x789A, 0);
builder.EndObject();
0x9A, 0x78, // value 1
0x56, 0x34, // value 0
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
{
var builder = new FlatBufferBuilder(1);
builder.StartObject(2);
- Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.Data);
+ Assert.ArrayEqual(new byte[] { 0 }, builder.DataBuffer.ToFullArray());
builder.AddShort(0, 0x3456, 0);
builder.AddBool(1, true, false);
builder.EndObject();
0, 1, // padding + value 1
0x56, 0x34, // value 0
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
4, 0, 0, 0,
0, 0, 0, 0,
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
0, 0, 55, 0, // value 0
0, 0, 0, 0, // length of vector (not in sctruc)
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
0x78, 0x56, // vector value 0
0x34, 0x12, // vector value 1
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
0x00, 0x00, 0x34, 0x12, // struct value 1
0x00, 0x00, 0x00, 55, // struct value 0
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
44, // vector 0, 1
33, // vector 0, 0
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
var off = builder.EndObject();
builder.Finish(off);
- Assert.ArrayEqual(new byte[]
+ byte[] padded = new byte[]
{
0, 0, 0, 0,
0, 0, 0, 0,
66, 0, // value 1
0, 33, // value 0
- },
- builder.DataBuffer.Data);
+ };
+ Assert.ArrayEqual(padded, builder.DataBuffer.ToFullArray());
+
+ // no padding in sized array
+ byte[] unpadded = new byte[padded.Length - 12];
+ Buffer.BlockCopy(padded, 12, unpadded, 0, unpadded.Length);
+ Assert.ArrayEqual(unpadded, builder.DataBuffer.ToSizedArray());
}
[FlatBuffersTestMethod]
44, // value 1, 0
33,
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
[FlatBuffersTestMethod]
var off = builder.EndObject();
builder.Finish(off);
- Assert.ArrayEqual(new byte[]
+ byte[] padded = new byte[]
{
0, 0, 0, 0,
0, 0, 0, 0,
1, 1, 1, 1, // values
1, 1, 1, 1,
- },
- builder.DataBuffer.Data);
+ };
+ Assert.ArrayEqual(padded, builder.DataBuffer.ToFullArray());
+
+ // no padding in sized array
+ byte[] unpadded = new byte[padded.Length - 28];
+ Buffer.BlockCopy(padded, 28, unpadded, 0, unpadded.Length);
+ Assert.ArrayEqual(unpadded, builder.DataBuffer.ToSizedArray());
+ }
+
+ [FlatBuffersTestMethod]
+ public void TestBunchOfBoolsSizePrefixed()
+ {
+ var builder = new FlatBufferBuilder(1);
+ builder.StartObject(8);
+ for (var i = 0; i < 8; i++)
+ {
+ builder.AddBool(i, true, false);
+ }
+ var off = builder.EndObject();
+ builder.FinishSizePrefixed(off);
+
+ byte[] padded = new byte[]
+ {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, // padding to 64 bytes
+
+ 36, 0, 0, 0, // size prefix
+ 24, 0, 0, 0, // root of table, pointing to vtable offset (obj0)
+ 20, 0, // vtable bytes
+ 12, 0, // object length
+ 11, 0, // start of value 0
+ 10, 0, // start of value 1
+ 9, 0, // start of value 2
+ 8, 0, // start of value 3
+ 7, 0, // start of value 4
+ 6, 0, // start of value 5
+ 5, 0, // start of value 6
+ 4, 0, // start of value 7
+
+ 20, 0, 0, 0, // int32 offset for start of vtable
+
+ 1, 1, 1, 1, // values
+ 1, 1, 1, 1,
+
+ };
+ Assert.ArrayEqual(padded, builder.DataBuffer.ToFullArray());
+
+ // no padding in sized array
+ byte[] unpadded = new byte[padded.Length - 24];
+ Buffer.BlockCopy(padded, 24, unpadded, 0, unpadded.Length);
+ Assert.ArrayEqual(unpadded, builder.DataBuffer.ToSizedArray());
}
[FlatBuffersTestMethod]
0, 0, 128, 63, // value
},
- builder.DataBuffer.Data);
+ builder.DataBuffer.ToFullArray());
}
private void CheckObjects(int fieldCount, int objectCount)
# Testing C# on Linux using Mono.
-mcs -out:fbnettest.exe ../../net/FlatBuffers/*.cs ../MyGame/Example/*.cs FlatBuffersTestClassAttribute.cs FlatBuffersTestMethodAttribute.cs Assert.cs FlatBuffersExampleTests.cs Program.cs ByteBufferTests.cs FlatBufferBuilderTests.cs FlatBuffersFuzzTests.cs FuzzTestData.cs Lcg.cs TestTable.cs
-./fbnettest.exe
+mcs -debug -out:./fbnettest.exe \
+ ../../net/FlatBuffers/*.cs ../MyGame/Example/*.cs ../MyGame/*.cs \
+ FlatBuffersTestClassAttribute.cs FlatBuffersTestMethodAttribute.cs Assert.cs FlatBuffersExampleTests.cs Program.cs ByteBufferTests.cs FlatBufferBuilderTests.cs FlatBuffersFuzzTests.cs FuzzTestData.cs Lcg.cs TestTable.cs
+mono --debug ./fbnettest.exe
rm fbnettest.exe
rm Resources/monsterdata_cstest.mon
+rm Resources/monsterdata_cstest_sp.mon
+
+# Repeat with unsafe versions
+
+mcs -debug -out:./fbnettest.exe \
+ -unsafe -d:UNSAFE_BYTEBUFFER \
+ ../../net/FlatBuffers/*.cs ../MyGame/Example/*.cs ../MyGame/*.cs \
+ FlatBuffersTestClassAttribute.cs FlatBuffersTestMethodAttribute.cs Assert.cs FlatBuffersExampleTests.cs Program.cs ByteBufferTests.cs FlatBufferBuilderTests.cs FlatBuffersFuzzTests.cs FuzzTestData.cs Lcg.cs TestTable.cs
+mono --debug ./fbnettest.exe
+rm fbnettest.exe
+rm Resources/monsterdata_cstest.mon
+rm Resources/monsterdata_cstest_sp.mon
import MyGame.Example.*;
import NamespaceA.*;
import NamespaceA.NamespaceB.*;
+import com.google.flatbuffers.ByteBufferUtil;
+import static com.google.flatbuffers.Constants.*;
import com.google.flatbuffers.FlatBufferBuilder;
class JavaTest {
// better for performance.
FlatBufferBuilder fbb = new FlatBufferBuilder(1);
- TestBuilderBasics(fbb);
+ TestBuilderBasics(fbb, true);
+ TestBuilderBasics(fbb, false);
TestExtendedBuffer(fbb.dataBuffer().asReadOnlyBuffer());
FlatBufferBuilder fbb = new FlatBufferBuilder(1, new MappedByteBufferFactory());
- TestBuilderBasics(fbb);
+ TestBuilderBasics(fbb, false);
}
static void TestSizedInputStream() {
// Test on default FlatBufferBuilder that uses HeapByteBuffer
FlatBufferBuilder fbb = new FlatBufferBuilder(1);
- TestBuilderBasics(fbb);
+ TestBuilderBasics(fbb, false);
InputStream in = fbb.sizedInputStream();
byte[] array = fbb.sizedByteArray();
TestEq(count, array.length);
}
- static void TestBuilderBasics(FlatBufferBuilder fbb) {
+ static void TestBuilderBasics(FlatBufferBuilder fbb, boolean sizePrefix) {
int[] names = {fbb.createString("Frodo"), fbb.createString("Barney"), fbb.createString("Wilma")};
int[] off = new int[3];
Monster.startMonster(fbb);
Monster.addTestarrayoftables(fbb, sortMons);
int mon = Monster.endMonster(fbb);
- Monster.finishMonsterBuffer(fbb, mon);
+ if (sizePrefix) {
+ Monster.finishSizePrefixedMonsterBuffer(fbb, mon);
+ } else {
+ Monster.finishMonsterBuffer(fbb, mon);
+ }
// Write the result to a file for debugging purposes:
// Note that the binaries are not necessarily identical, since the JSON
// Java code. They are functionally equivalent though.
try {
- FileChannel fc = new FileOutputStream("monsterdata_java_wire.mon").getChannel();
+ String filename = "monsterdata_java_wire" + (sizePrefix ? "_sp" : "") + ".mon";
+ FileChannel fc = new FileOutputStream(filename).getChannel();
fc.write(fbb.dataBuffer().duplicate());
fc.close();
} catch(java.io.IOException e) {
}
// Test it:
- TestExtendedBuffer(fbb.dataBuffer());
+ ByteBuffer dataBuffer = fbb.dataBuffer();
+ if (sizePrefix) {
+ TestEq(ByteBufferUtil.getSizePrefix(dataBuffer) + SIZE_PREFIX_LENGTH,
+ dataBuffer.remaining());
+ dataBuffer = ByteBufferUtil.removeSizePrefix(dataBuffer);
+ }
+ TestExtendedBuffer(dataBuffer);
// Make sure it also works with read only ByteBuffers. This is slower,
// since creating strings incurs an additional copy
// (see Table.__string).
- TestExtendedBuffer(fbb.dataBuffer().asReadOnlyBuffer());
+ TestExtendedBuffer(dataBuffer.asReadOnlyBuffer());
TestEnums();
//Attempt to mutate Monster fields and check whether the buffer has been mutated properly
// revert to original values after testing
- Monster monster = Monster.getRootAsMonster(fbb.dataBuffer());
+ Monster monster = Monster.getRootAsMonster(dataBuffer);
// mana is optional and does not exist in the buffer so the mutation should fail
// the mana field should retain its default value
public ArraySegment<byte>? GetVectorOfDoublesBytes() { return __p.__vector_as_arraysegment(70); }
public bool MutateVectorOfDoubles(int j, double vector_of_doubles) { int o = __p.__offset(70); if (o != 0) { __p.bb.PutDouble(__p.__vector(o) + j * 8, vector_of_doubles); return true; } else { return false; } }
public MyGame.InParentNamespace? ParentNamespaceTest { get { int o = __p.__offset(72); return o != 0 ? (MyGame.InParentNamespace?)(new MyGame.InParentNamespace()).__assign(__p.__indirect(o + __p.bb_pos), __p.bb) : null; } }
+ public Referrable? VectorOfReferrables(int j) { int o = __p.__offset(74); return o != 0 ? (Referrable?)(new Referrable()).__assign(__p.__indirect(__p.__vector(o) + j * 4), __p.bb) : null; }
+ public int VectorOfReferrablesLength { get { int o = __p.__offset(74); return o != 0 ? __p.__vector_len(o) : 0; } }
+ public Referrable? VectorOfReferrablesByKey(ulong key) { int o = __p.__offset(74); return o != 0 ? Referrable.__lookup_by_key(__p.__vector(o), key, __p.bb) : null; }
+ public ulong SingleWeakReference { get { int o = __p.__offset(76); return o != 0 ? __p.bb.GetUlong(o + __p.bb_pos) : (ulong)0; } }
+ public bool MutateSingleWeakReference(ulong single_weak_reference) { int o = __p.__offset(76); if (o != 0) { __p.bb.PutUlong(o + __p.bb_pos, single_weak_reference); return true; } else { return false; } }
+ public ulong VectorOfWeakReferences(int j) { int o = __p.__offset(78); return o != 0 ? __p.bb.GetUlong(__p.__vector(o) + j * 8) : (ulong)0; }
+ public int VectorOfWeakReferencesLength { get { int o = __p.__offset(78); return o != 0 ? __p.__vector_len(o) : 0; } }
+ public ArraySegment<byte>? GetVectorOfWeakReferencesBytes() { return __p.__vector_as_arraysegment(78); }
+ public bool MutateVectorOfWeakReferences(int j, ulong vector_of_weak_references) { int o = __p.__offset(78); if (o != 0) { __p.bb.PutUlong(__p.__vector(o) + j * 8, vector_of_weak_references); return true; } else { return false; } }
+ public Referrable? VectorOfStrongReferrables(int j) { int o = __p.__offset(80); return o != 0 ? (Referrable?)(new Referrable()).__assign(__p.__indirect(__p.__vector(o) + j * 4), __p.bb) : null; }
+ public int VectorOfStrongReferrablesLength { get { int o = __p.__offset(80); return o != 0 ? __p.__vector_len(o) : 0; } }
+ public Referrable? VectorOfStrongReferrablesByKey(ulong key) { int o = __p.__offset(80); return o != 0 ? Referrable.__lookup_by_key(__p.__vector(o), key, __p.bb) : null; }
+ public ulong CoOwningReference { get { int o = __p.__offset(82); return o != 0 ? __p.bb.GetUlong(o + __p.bb_pos) : (ulong)0; } }
+ public bool MutateCoOwningReference(ulong co_owning_reference) { int o = __p.__offset(82); if (o != 0) { __p.bb.PutUlong(o + __p.bb_pos, co_owning_reference); return true; } else { return false; } }
+ public ulong VectorOfCoOwningReferences(int j) { int o = __p.__offset(84); return o != 0 ? __p.bb.GetUlong(__p.__vector(o) + j * 8) : (ulong)0; }
+ public int VectorOfCoOwningReferencesLength { get { int o = __p.__offset(84); return o != 0 ? __p.__vector_len(o) : 0; } }
+ public ArraySegment<byte>? GetVectorOfCoOwningReferencesBytes() { return __p.__vector_as_arraysegment(84); }
+ public bool MutateVectorOfCoOwningReferences(int j, ulong vector_of_co_owning_references) { int o = __p.__offset(84); if (o != 0) { __p.bb.PutUlong(__p.__vector(o) + j * 8, vector_of_co_owning_references); return true; } else { return false; } }
+ public ulong NonOwningReference { get { int o = __p.__offset(86); return o != 0 ? __p.bb.GetUlong(o + __p.bb_pos) : (ulong)0; } }
+ public bool MutateNonOwningReference(ulong non_owning_reference) { int o = __p.__offset(86); if (o != 0) { __p.bb.PutUlong(o + __p.bb_pos, non_owning_reference); return true; } else { return false; } }
+ public ulong VectorOfNonOwningReferences(int j) { int o = __p.__offset(88); return o != 0 ? __p.bb.GetUlong(__p.__vector(o) + j * 8) : (ulong)0; }
+ public int VectorOfNonOwningReferencesLength { get { int o = __p.__offset(88); return o != 0 ? __p.__vector_len(o) : 0; } }
+ public ArraySegment<byte>? GetVectorOfNonOwningReferencesBytes() { return __p.__vector_as_arraysegment(88); }
+ public bool MutateVectorOfNonOwningReferences(int j, ulong vector_of_non_owning_references) { int o = __p.__offset(88); if (o != 0) { __p.bb.PutUlong(__p.__vector(o) + j * 8, vector_of_non_owning_references); return true; } else { return false; } }
- public static void StartMonster(FlatBufferBuilder builder) { builder.StartObject(35); }
+ public static void StartMonster(FlatBufferBuilder builder) { builder.StartObject(43); }
public static void AddPos(FlatBufferBuilder builder, Offset<Vec3> posOffset) { builder.AddStruct(0, posOffset.Value, 0); }
public static void AddMana(FlatBufferBuilder builder, short mana) { builder.AddShort(1, mana, 150); }
public static void AddHp(FlatBufferBuilder builder, short hp) { builder.AddShort(2, hp, 100); }
public static VectorOffset CreateVectorOfDoublesVector(FlatBufferBuilder builder, double[] data) { builder.StartVector(8, data.Length, 8); for (int i = data.Length - 1; i >= 0; i--) builder.AddDouble(data[i]); return builder.EndVector(); }
public static void StartVectorOfDoublesVector(FlatBufferBuilder builder, int numElems) { builder.StartVector(8, numElems, 8); }
public static void AddParentNamespaceTest(FlatBufferBuilder builder, Offset<MyGame.InParentNamespace> parentNamespaceTestOffset) { builder.AddOffset(34, parentNamespaceTestOffset.Value, 0); }
+ public static void AddVectorOfReferrables(FlatBufferBuilder builder, VectorOffset vectorOfReferrablesOffset) { builder.AddOffset(35, vectorOfReferrablesOffset.Value, 0); }
+ public static VectorOffset CreateVectorOfReferrablesVector(FlatBufferBuilder builder, Offset<Referrable>[] data) { builder.StartVector(4, data.Length, 4); for (int i = data.Length - 1; i >= 0; i--) builder.AddOffset(data[i].Value); return builder.EndVector(); }
+ public static void StartVectorOfReferrablesVector(FlatBufferBuilder builder, int numElems) { builder.StartVector(4, numElems, 4); }
+ public static void AddSingleWeakReference(FlatBufferBuilder builder, ulong singleWeakReference) { builder.AddUlong(36, singleWeakReference, 0); }
+ public static void AddVectorOfWeakReferences(FlatBufferBuilder builder, VectorOffset vectorOfWeakReferencesOffset) { builder.AddOffset(37, vectorOfWeakReferencesOffset.Value, 0); }
+ public static VectorOffset CreateVectorOfWeakReferencesVector(FlatBufferBuilder builder, ulong[] data) { builder.StartVector(8, data.Length, 8); for (int i = data.Length - 1; i >= 0; i--) builder.AddUlong(data[i]); return builder.EndVector(); }
+ public static void StartVectorOfWeakReferencesVector(FlatBufferBuilder builder, int numElems) { builder.StartVector(8, numElems, 8); }
+ public static void AddVectorOfStrongReferrables(FlatBufferBuilder builder, VectorOffset vectorOfStrongReferrablesOffset) { builder.AddOffset(38, vectorOfStrongReferrablesOffset.Value, 0); }
+ public static VectorOffset CreateVectorOfStrongReferrablesVector(FlatBufferBuilder builder, Offset<Referrable>[] data) { builder.StartVector(4, data.Length, 4); for (int i = data.Length - 1; i >= 0; i--) builder.AddOffset(data[i].Value); return builder.EndVector(); }
+ public static void StartVectorOfStrongReferrablesVector(FlatBufferBuilder builder, int numElems) { builder.StartVector(4, numElems, 4); }
+ public static void AddCoOwningReference(FlatBufferBuilder builder, ulong coOwningReference) { builder.AddUlong(39, coOwningReference, 0); }
+ public static void AddVectorOfCoOwningReferences(FlatBufferBuilder builder, VectorOffset vectorOfCoOwningReferencesOffset) { builder.AddOffset(40, vectorOfCoOwningReferencesOffset.Value, 0); }
+ public static VectorOffset CreateVectorOfCoOwningReferencesVector(FlatBufferBuilder builder, ulong[] data) { builder.StartVector(8, data.Length, 8); for (int i = data.Length - 1; i >= 0; i--) builder.AddUlong(data[i]); return builder.EndVector(); }
+ public static void StartVectorOfCoOwningReferencesVector(FlatBufferBuilder builder, int numElems) { builder.StartVector(8, numElems, 8); }
+ public static void AddNonOwningReference(FlatBufferBuilder builder, ulong nonOwningReference) { builder.AddUlong(41, nonOwningReference, 0); }
+ public static void AddVectorOfNonOwningReferences(FlatBufferBuilder builder, VectorOffset vectorOfNonOwningReferencesOffset) { builder.AddOffset(42, vectorOfNonOwningReferencesOffset.Value, 0); }
+ public static VectorOffset CreateVectorOfNonOwningReferencesVector(FlatBufferBuilder builder, ulong[] data) { builder.StartVector(8, data.Length, 8); for (int i = data.Length - 1; i >= 0; i--) builder.AddUlong(data[i]); return builder.EndVector(); }
+ public static void StartVectorOfNonOwningReferencesVector(FlatBufferBuilder builder, int numElems) { builder.StartVector(8, numElems, 8); }
public static Offset<Monster> EndMonster(FlatBufferBuilder builder) {
int o = builder.EndObject();
builder.Required(o, 10); // name
return new Offset<Monster>(o);
}
public static void FinishMonsterBuffer(FlatBufferBuilder builder, Offset<Monster> offset) { builder.Finish(offset.Value, "MONS"); }
+ public static void FinishSizePrefixedMonsterBuffer(FlatBufferBuilder builder, Offset<Monster> offset) { builder.FinishSizePrefixed(offset.Value, "MONS"); }
public static VectorOffset CreateSortedVectorOfMonster(FlatBufferBuilder builder, Offset<Monster>[] offsets) {
Array.Sort(offsets, (Offset<Monster> o1, Offset<Monster> o2) => Table.CompareStrings(Table.__offset(10, o1.Value, builder.DataBuffer), Table.__offset(10, o2.Value, builder.DataBuffer), builder.DataBuffer));
return nil
}
+func (rcv *Monster) VectorOfReferrables(obj *Referrable, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(74))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Monster) VectorOfReferrablesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(74))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *Monster) SingleWeakReference() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(76))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Monster) MutateSingleWeakReference(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(76, n)
+}
+
+func (rcv *Monster) VectorOfWeakReferences(j int) uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(78))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8))
+ }
+ return 0
+}
+
+func (rcv *Monster) VectorOfWeakReferencesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(78))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *Monster) VectorOfStrongReferrables(obj *Referrable, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(80))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Monster) VectorOfStrongReferrablesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(80))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *Monster) CoOwningReference() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(82))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Monster) MutateCoOwningReference(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(82, n)
+}
+
+func (rcv *Monster) VectorOfCoOwningReferences(j int) uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(84))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8))
+ }
+ return 0
+}
+
+func (rcv *Monster) VectorOfCoOwningReferencesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(84))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *Monster) NonOwningReference() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(86))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Monster) MutateNonOwningReference(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(86, n)
+}
+
+func (rcv *Monster) VectorOfNonOwningReferences(j int) uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(88))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8))
+ }
+ return 0
+}
+
+func (rcv *Monster) VectorOfNonOwningReferencesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(88))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
func MonsterStart(builder *flatbuffers.Builder) {
- builder.StartObject(35)
+ builder.StartObject(43)
}
func MonsterAddPos(builder *flatbuffers.Builder, pos flatbuffers.UOffsetT) {
builder.PrependStructSlot(0, flatbuffers.UOffsetT(pos), 0)
func MonsterAddParentNamespaceTest(builder *flatbuffers.Builder, parentNamespaceTest flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(34, flatbuffers.UOffsetT(parentNamespaceTest), 0)
}
+func MonsterAddVectorOfReferrables(builder *flatbuffers.Builder, vectorOfReferrables flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(35, flatbuffers.UOffsetT(vectorOfReferrables), 0)
+}
+func MonsterStartVectorOfReferrablesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func MonsterAddSingleWeakReference(builder *flatbuffers.Builder, singleWeakReference uint64) {
+ builder.PrependUint64Slot(36, singleWeakReference, 0)
+}
+func MonsterAddVectorOfWeakReferences(builder *flatbuffers.Builder, vectorOfWeakReferences flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(37, flatbuffers.UOffsetT(vectorOfWeakReferences), 0)
+}
+func MonsterStartVectorOfWeakReferencesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(8, numElems, 8)
+}
+func MonsterAddVectorOfStrongReferrables(builder *flatbuffers.Builder, vectorOfStrongReferrables flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(38, flatbuffers.UOffsetT(vectorOfStrongReferrables), 0)
+}
+func MonsterStartVectorOfStrongReferrablesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func MonsterAddCoOwningReference(builder *flatbuffers.Builder, coOwningReference uint64) {
+ builder.PrependUint64Slot(39, coOwningReference, 0)
+}
+func MonsterAddVectorOfCoOwningReferences(builder *flatbuffers.Builder, vectorOfCoOwningReferences flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(40, flatbuffers.UOffsetT(vectorOfCoOwningReferences), 0)
+}
+func MonsterStartVectorOfCoOwningReferencesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(8, numElems, 8)
+}
+func MonsterAddNonOwningReference(builder *flatbuffers.Builder, nonOwningReference uint64) {
+ builder.PrependUint64Slot(41, nonOwningReference, 0)
+}
+func MonsterAddVectorOfNonOwningReferences(builder *flatbuffers.Builder, vectorOfNonOwningReferences flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(42, flatbuffers.UOffsetT(vectorOfNonOwningReferences), 0)
+}
+func MonsterStartVectorOfNonOwningReferencesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(8, numElems, 8)
+}
func MonsterEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
public boolean mutateHp(short hp) { int o = __offset(8); if (o != 0) { bb.putShort(o + bb_pos, hp); return true; } else { return false; } }
public String name() { int o = __offset(10); return o != 0 ? __string(o + bb_pos) : null; }
public ByteBuffer nameAsByteBuffer() { return __vector_as_bytebuffer(10, 1); }
+ public ByteBuffer nameInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 10, 1); }
public int inventory(int j) { int o = __offset(14); return o != 0 ? bb.get(__vector(o) + j * 1) & 0xFF : 0; }
public int inventoryLength() { int o = __offset(14); return o != 0 ? __vector_len(o) : 0; }
public ByteBuffer inventoryAsByteBuffer() { return __vector_as_bytebuffer(14, 1); }
+ public ByteBuffer inventoryInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 14, 1); }
public boolean mutateInventory(int j, int inventory) { int o = __offset(14); if (o != 0) { bb.put(__vector(o) + j * 1, (byte)inventory); return true; } else { return false; } }
public byte color() { int o = __offset(16); return o != 0 ? bb.get(o + bb_pos) : 8; }
public boolean mutateColor(byte color) { int o = __offset(16); if (o != 0) { bb.put(o + bb_pos, color); return true; } else { return false; } }
public Monster testarrayoftables(int j) { return testarrayoftables(new Monster(), j); }
public Monster testarrayoftables(Monster obj, int j) { int o = __offset(26); return o != 0 ? obj.__assign(__indirect(__vector(o) + j * 4), bb) : null; }
public int testarrayoftablesLength() { int o = __offset(26); return o != 0 ? __vector_len(o) : 0; }
- public Monster testarrayoftablesByKey(String key) { int o = __offset(26); return o != 0 ? Monster.__lookup_by_key(__vector(o), key, bb) : null; }
+ public Monster testarrayoftablesByKey(String key) { int o = __offset(26); return o != 0 ? Monster.__lookup_by_key(null, __vector(o), key, bb) : null; }
+ public Monster testarrayoftablesByKey(Monster obj, String key) { int o = __offset(26); return o != 0 ? Monster.__lookup_by_key(obj, __vector(o), key, bb) : null; }
public Monster enemy() { return enemy(new Monster()); }
public Monster enemy(Monster obj) { int o = __offset(28); return o != 0 ? obj.__assign(__indirect(o + bb_pos), bb) : null; }
public int testnestedflatbuffer(int j) { int o = __offset(30); return o != 0 ? bb.get(__vector(o) + j * 1) & 0xFF : 0; }
public int testnestedflatbufferLength() { int o = __offset(30); return o != 0 ? __vector_len(o) : 0; }
public ByteBuffer testnestedflatbufferAsByteBuffer() { return __vector_as_bytebuffer(30, 1); }
+ public ByteBuffer testnestedflatbufferInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 30, 1); }
public Monster testnestedflatbufferAsMonster() { return testnestedflatbufferAsMonster(new Monster()); }
public Monster testnestedflatbufferAsMonster(Monster obj) { int o = __offset(30); return o != 0 ? obj.__assign(__indirect(__vector(o)), bb) : null; }
public boolean mutateTestnestedflatbuffer(int j, int testnestedflatbuffer) { int o = __offset(30); if (o != 0) { bb.put(__vector(o) + j * 1, (byte)testnestedflatbuffer); return true; } else { return false; } }
public boolean testarrayofbools(int j) { int o = __offset(52); return o != 0 ? 0!=bb.get(__vector(o) + j * 1) : false; }
public int testarrayofboolsLength() { int o = __offset(52); return o != 0 ? __vector_len(o) : 0; }
public ByteBuffer testarrayofboolsAsByteBuffer() { return __vector_as_bytebuffer(52, 1); }
+ public ByteBuffer testarrayofboolsInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 52, 1); }
public boolean mutateTestarrayofbools(int j, boolean testarrayofbools) { int o = __offset(52); if (o != 0) { bb.put(__vector(o) + j * 1, (byte)(testarrayofbools ? 1 : 0)); return true; } else { return false; } }
public float testf() { int o = __offset(54); return o != 0 ? bb.getFloat(o + bb_pos) : 3.14159f; }
public boolean mutateTestf(float testf) { int o = __offset(54); if (o != 0) { bb.putFloat(o + bb_pos, testf); return true; } else { return false; } }
public int flex(int j) { int o = __offset(64); return o != 0 ? bb.get(__vector(o) + j * 1) & 0xFF : 0; }
public int flexLength() { int o = __offset(64); return o != 0 ? __vector_len(o) : 0; }
public ByteBuffer flexAsByteBuffer() { return __vector_as_bytebuffer(64, 1); }
+ public ByteBuffer flexInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 64, 1); }
public boolean mutateFlex(int j, int flex) { int o = __offset(64); if (o != 0) { bb.put(__vector(o) + j * 1, (byte)flex); return true; } else { return false; } }
public Test test5(int j) { return test5(new Test(), j); }
public Test test5(Test obj, int j) { int o = __offset(66); return o != 0 ? obj.__assign(__vector(o) + j * 4, bb) : null; }
public long vectorOfLongs(int j) { int o = __offset(68); return o != 0 ? bb.getLong(__vector(o) + j * 8) : 0; }
public int vectorOfLongsLength() { int o = __offset(68); return o != 0 ? __vector_len(o) : 0; }
public ByteBuffer vectorOfLongsAsByteBuffer() { return __vector_as_bytebuffer(68, 8); }
+ public ByteBuffer vectorOfLongsInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 68, 8); }
public boolean mutateVectorOfLongs(int j, long vector_of_longs) { int o = __offset(68); if (o != 0) { bb.putLong(__vector(o) + j * 8, vector_of_longs); return true; } else { return false; } }
public double vectorOfDoubles(int j) { int o = __offset(70); return o != 0 ? bb.getDouble(__vector(o) + j * 8) : 0; }
public int vectorOfDoublesLength() { int o = __offset(70); return o != 0 ? __vector_len(o) : 0; }
public ByteBuffer vectorOfDoublesAsByteBuffer() { return __vector_as_bytebuffer(70, 8); }
+ public ByteBuffer vectorOfDoublesInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 70, 8); }
public boolean mutateVectorOfDoubles(int j, double vector_of_doubles) { int o = __offset(70); if (o != 0) { bb.putDouble(__vector(o) + j * 8, vector_of_doubles); return true; } else { return false; } }
public MyGame.InParentNamespace parentNamespaceTest() { return parentNamespaceTest(new MyGame.InParentNamespace()); }
public MyGame.InParentNamespace parentNamespaceTest(MyGame.InParentNamespace obj) { int o = __offset(72); return o != 0 ? obj.__assign(__indirect(o + bb_pos), bb) : null; }
+ public Referrable vectorOfReferrables(int j) { return vectorOfReferrables(new Referrable(), j); }
+ public Referrable vectorOfReferrables(Referrable obj, int j) { int o = __offset(74); return o != 0 ? obj.__assign(__indirect(__vector(o) + j * 4), bb) : null; }
+ public int vectorOfReferrablesLength() { int o = __offset(74); return o != 0 ? __vector_len(o) : 0; }
+ public Referrable vectorOfReferrablesByKey(long key) { int o = __offset(74); return o != 0 ? Referrable.__lookup_by_key(null, __vector(o), key, bb) : null; }
+ public Referrable vectorOfReferrablesByKey(Referrable obj, long key) { int o = __offset(74); return o != 0 ? Referrable.__lookup_by_key(obj, __vector(o), key, bb) : null; }
+ public long singleWeakReference() { int o = __offset(76); return o != 0 ? bb.getLong(o + bb_pos) : 0L; }
+ public boolean mutateSingleWeakReference(long single_weak_reference) { int o = __offset(76); if (o != 0) { bb.putLong(o + bb_pos, single_weak_reference); return true; } else { return false; } }
+ public long vectorOfWeakReferences(int j) { int o = __offset(78); return o != 0 ? bb.getLong(__vector(o) + j * 8) : 0; }
+ public int vectorOfWeakReferencesLength() { int o = __offset(78); return o != 0 ? __vector_len(o) : 0; }
+ public ByteBuffer vectorOfWeakReferencesAsByteBuffer() { return __vector_as_bytebuffer(78, 8); }
+ public ByteBuffer vectorOfWeakReferencesInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 78, 8); }
+ public boolean mutateVectorOfWeakReferences(int j, long vector_of_weak_references) { int o = __offset(78); if (o != 0) { bb.putLong(__vector(o) + j * 8, vector_of_weak_references); return true; } else { return false; } }
+ public Referrable vectorOfStrongReferrables(int j) { return vectorOfStrongReferrables(new Referrable(), j); }
+ public Referrable vectorOfStrongReferrables(Referrable obj, int j) { int o = __offset(80); return o != 0 ? obj.__assign(__indirect(__vector(o) + j * 4), bb) : null; }
+ public int vectorOfStrongReferrablesLength() { int o = __offset(80); return o != 0 ? __vector_len(o) : 0; }
+ public Referrable vectorOfStrongReferrablesByKey(long key) { int o = __offset(80); return o != 0 ? Referrable.__lookup_by_key(null, __vector(o), key, bb) : null; }
+ public Referrable vectorOfStrongReferrablesByKey(Referrable obj, long key) { int o = __offset(80); return o != 0 ? Referrable.__lookup_by_key(obj, __vector(o), key, bb) : null; }
+ public long coOwningReference() { int o = __offset(82); return o != 0 ? bb.getLong(o + bb_pos) : 0L; }
+ public boolean mutateCoOwningReference(long co_owning_reference) { int o = __offset(82); if (o != 0) { bb.putLong(o + bb_pos, co_owning_reference); return true; } else { return false; } }
+ public long vectorOfCoOwningReferences(int j) { int o = __offset(84); return o != 0 ? bb.getLong(__vector(o) + j * 8) : 0; }
+ public int vectorOfCoOwningReferencesLength() { int o = __offset(84); return o != 0 ? __vector_len(o) : 0; }
+ public ByteBuffer vectorOfCoOwningReferencesAsByteBuffer() { return __vector_as_bytebuffer(84, 8); }
+ public ByteBuffer vectorOfCoOwningReferencesInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 84, 8); }
+ public boolean mutateVectorOfCoOwningReferences(int j, long vector_of_co_owning_references) { int o = __offset(84); if (o != 0) { bb.putLong(__vector(o) + j * 8, vector_of_co_owning_references); return true; } else { return false; } }
+ public long nonOwningReference() { int o = __offset(86); return o != 0 ? bb.getLong(o + bb_pos) : 0L; }
+ public boolean mutateNonOwningReference(long non_owning_reference) { int o = __offset(86); if (o != 0) { bb.putLong(o + bb_pos, non_owning_reference); return true; } else { return false; } }
+ public long vectorOfNonOwningReferences(int j) { int o = __offset(88); return o != 0 ? bb.getLong(__vector(o) + j * 8) : 0; }
+ public int vectorOfNonOwningReferencesLength() { int o = __offset(88); return o != 0 ? __vector_len(o) : 0; }
+ public ByteBuffer vectorOfNonOwningReferencesAsByteBuffer() { return __vector_as_bytebuffer(88, 8); }
+ public ByteBuffer vectorOfNonOwningReferencesInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 88, 8); }
+ public boolean mutateVectorOfNonOwningReferences(int j, long vector_of_non_owning_references) { int o = __offset(88); if (o != 0) { bb.putLong(__vector(o) + j * 8, vector_of_non_owning_references); return true; } else { return false; } }
- public static void startMonster(FlatBufferBuilder builder) { builder.startObject(35); }
+ public static void startMonster(FlatBufferBuilder builder) { builder.startObject(43); }
public static void addPos(FlatBufferBuilder builder, int posOffset) { builder.addStruct(0, posOffset, 0); }
public static void addMana(FlatBufferBuilder builder, short mana) { builder.addShort(1, mana, 150); }
public static void addHp(FlatBufferBuilder builder, short hp) { builder.addShort(2, hp, 100); }
public static int createVectorOfDoublesVector(FlatBufferBuilder builder, double[] data) { builder.startVector(8, data.length, 8); for (int i = data.length - 1; i >= 0; i--) builder.addDouble(data[i]); return builder.endVector(); }
public static void startVectorOfDoublesVector(FlatBufferBuilder builder, int numElems) { builder.startVector(8, numElems, 8); }
public static void addParentNamespaceTest(FlatBufferBuilder builder, int parentNamespaceTestOffset) { builder.addOffset(34, parentNamespaceTestOffset, 0); }
+ public static void addVectorOfReferrables(FlatBufferBuilder builder, int vectorOfReferrablesOffset) { builder.addOffset(35, vectorOfReferrablesOffset, 0); }
+ public static int createVectorOfReferrablesVector(FlatBufferBuilder builder, int[] data) { builder.startVector(4, data.length, 4); for (int i = data.length - 1; i >= 0; i--) builder.addOffset(data[i]); return builder.endVector(); }
+ public static void startVectorOfReferrablesVector(FlatBufferBuilder builder, int numElems) { builder.startVector(4, numElems, 4); }
+ public static void addSingleWeakReference(FlatBufferBuilder builder, long singleWeakReference) { builder.addLong(36, singleWeakReference, 0L); }
+ public static void addVectorOfWeakReferences(FlatBufferBuilder builder, int vectorOfWeakReferencesOffset) { builder.addOffset(37, vectorOfWeakReferencesOffset, 0); }
+ public static int createVectorOfWeakReferencesVector(FlatBufferBuilder builder, long[] data) { builder.startVector(8, data.length, 8); for (int i = data.length - 1; i >= 0; i--) builder.addLong(data[i]); return builder.endVector(); }
+ public static void startVectorOfWeakReferencesVector(FlatBufferBuilder builder, int numElems) { builder.startVector(8, numElems, 8); }
+ public static void addVectorOfStrongReferrables(FlatBufferBuilder builder, int vectorOfStrongReferrablesOffset) { builder.addOffset(38, vectorOfStrongReferrablesOffset, 0); }
+ public static int createVectorOfStrongReferrablesVector(FlatBufferBuilder builder, int[] data) { builder.startVector(4, data.length, 4); for (int i = data.length - 1; i >= 0; i--) builder.addOffset(data[i]); return builder.endVector(); }
+ public static void startVectorOfStrongReferrablesVector(FlatBufferBuilder builder, int numElems) { builder.startVector(4, numElems, 4); }
+ public static void addCoOwningReference(FlatBufferBuilder builder, long coOwningReference) { builder.addLong(39, coOwningReference, 0L); }
+ public static void addVectorOfCoOwningReferences(FlatBufferBuilder builder, int vectorOfCoOwningReferencesOffset) { builder.addOffset(40, vectorOfCoOwningReferencesOffset, 0); }
+ public static int createVectorOfCoOwningReferencesVector(FlatBufferBuilder builder, long[] data) { builder.startVector(8, data.length, 8); for (int i = data.length - 1; i >= 0; i--) builder.addLong(data[i]); return builder.endVector(); }
+ public static void startVectorOfCoOwningReferencesVector(FlatBufferBuilder builder, int numElems) { builder.startVector(8, numElems, 8); }
+ public static void addNonOwningReference(FlatBufferBuilder builder, long nonOwningReference) { builder.addLong(41, nonOwningReference, 0L); }
+ public static void addVectorOfNonOwningReferences(FlatBufferBuilder builder, int vectorOfNonOwningReferencesOffset) { builder.addOffset(42, vectorOfNonOwningReferencesOffset, 0); }
+ public static int createVectorOfNonOwningReferencesVector(FlatBufferBuilder builder, long[] data) { builder.startVector(8, data.length, 8); for (int i = data.length - 1; i >= 0; i--) builder.addLong(data[i]); return builder.endVector(); }
+ public static void startVectorOfNonOwningReferencesVector(FlatBufferBuilder builder, int numElems) { builder.startVector(8, numElems, 8); }
public static int endMonster(FlatBufferBuilder builder) {
int o = builder.endObject();
builder.required(o, 10); // name
return o;
}
public static void finishMonsterBuffer(FlatBufferBuilder builder, int offset) { builder.finish(offset, "MONS"); }
+ public static void finishSizePrefixedMonsterBuffer(FlatBufferBuilder builder, int offset) { builder.finishSizePrefixed(offset, "MONS"); }
@Override
protected int keysCompare(Integer o1, Integer o2, ByteBuffer _bb) { return compareStrings(__offset(10, o1, _bb), __offset(10, o2, _bb), _bb); }
- public static Monster __lookup_by_key(int vectorLocation, String key, ByteBuffer bb) {
+ public static Monster __lookup_by_key(Monster obj, int vectorLocation, String key, ByteBuffer bb) {
byte[] byteKey = key.getBytes(Table.UTF8_CHARSET.get());
int span = bb.getInt(vectorLocation - 4);
int start = 0;
start += middle;
span -= middle;
} else {
- return new Monster().__assign(tableOffset, bb);
+ return (obj == null ? new Monster() : obj).__assign(tableOffset, bb);
}
}
return null;
return $o != 0 ? $obj->init($this->__indirect($o + $this->bb_pos), $this->bb) : 0;
}
+ /**
+ * @returnVectorOffset
+ */
+ public function getVectorOfReferrables($j)
+ {
+ $o = $this->__offset(74);
+ $obj = new Referrable();
+ return $o != 0 ? $obj->init($this->__indirect($this->__vector($o) + $j * 4), $this->bb) : null;
+ }
+
+ /**
+ * @return int
+ */
+ public function getVectorOfReferrablesLength()
+ {
+ $o = $this->__offset(74);
+ return $o != 0 ? $this->__vector_len($o) : 0;
+ }
+
+ /**
+ * @return ulong
+ */
+ public function getSingleWeakReference()
+ {
+ $o = $this->__offset(76);
+ return $o != 0 ? $this->bb->getUlong($o + $this->bb_pos) : 0;
+ }
+
+ /**
+ * @param int offset
+ * @return ulong
+ */
+ public function getVectorOfWeakReferences($j)
+ {
+ $o = $this->__offset(78);
+ return $o != 0 ? $this->bb->getUlong($this->__vector($o) + $j * 8) : 0;
+ }
+
+ /**
+ * @return int
+ */
+ public function getVectorOfWeakReferencesLength()
+ {
+ $o = $this->__offset(78);
+ return $o != 0 ? $this->__vector_len($o) : 0;
+ }
+
+ /**
+ * @returnVectorOffset
+ */
+ public function getVectorOfStrongReferrables($j)
+ {
+ $o = $this->__offset(80);
+ $obj = new Referrable();
+ return $o != 0 ? $obj->init($this->__indirect($this->__vector($o) + $j * 4), $this->bb) : null;
+ }
+
+ /**
+ * @return int
+ */
+ public function getVectorOfStrongReferrablesLength()
+ {
+ $o = $this->__offset(80);
+ return $o != 0 ? $this->__vector_len($o) : 0;
+ }
+
+ /**
+ * @return ulong
+ */
+ public function getCoOwningReference()
+ {
+ $o = $this->__offset(82);
+ return $o != 0 ? $this->bb->getUlong($o + $this->bb_pos) : 0;
+ }
+
+ /**
+ * @param int offset
+ * @return ulong
+ */
+ public function getVectorOfCoOwningReferences($j)
+ {
+ $o = $this->__offset(84);
+ return $o != 0 ? $this->bb->getUlong($this->__vector($o) + $j * 8) : 0;
+ }
+
+ /**
+ * @return int
+ */
+ public function getVectorOfCoOwningReferencesLength()
+ {
+ $o = $this->__offset(84);
+ return $o != 0 ? $this->__vector_len($o) : 0;
+ }
+
+ /**
+ * @return ulong
+ */
+ public function getNonOwningReference()
+ {
+ $o = $this->__offset(86);
+ return $o != 0 ? $this->bb->getUlong($o + $this->bb_pos) : 0;
+ }
+
+ /**
+ * @param int offset
+ * @return ulong
+ */
+ public function getVectorOfNonOwningReferences($j)
+ {
+ $o = $this->__offset(88);
+ return $o != 0 ? $this->bb->getUlong($this->__vector($o) + $j * 8) : 0;
+ }
+
+ /**
+ * @return int
+ */
+ public function getVectorOfNonOwningReferencesLength()
+ {
+ $o = $this->__offset(88);
+ return $o != 0 ? $this->__vector_len($o) : 0;
+ }
+
/**
* @param FlatBufferBuilder $builder
* @return void
*/
public static function startMonster(FlatBufferBuilder $builder)
{
- $builder->StartObject(35);
+ $builder->StartObject(43);
}
/**
* @param FlatBufferBuilder $builder
* @return Monster
*/
- public static function createMonster(FlatBufferBuilder $builder, $pos, $mana, $hp, $name, $inventory, $color, $test_type, $test, $test4, $testarrayofstring, $testarrayoftables, $enemy, $testnestedflatbuffer, $testempty, $testbool, $testhashs32_fnv1, $testhashu32_fnv1, $testhashs64_fnv1, $testhashu64_fnv1, $testhashs32_fnv1a, $testhashu32_fnv1a, $testhashs64_fnv1a, $testhashu64_fnv1a, $testarrayofbools, $testf, $testf2, $testf3, $testarrayofstring2, $testarrayofsortedstruct, $flex, $test5, $vector_of_longs, $vector_of_doubles, $parent_namespace_test)
+ public static function createMonster(FlatBufferBuilder $builder, $pos, $mana, $hp, $name, $inventory, $color, $test_type, $test, $test4, $testarrayofstring, $testarrayoftables, $enemy, $testnestedflatbuffer, $testempty, $testbool, $testhashs32_fnv1, $testhashu32_fnv1, $testhashs64_fnv1, $testhashu64_fnv1, $testhashs32_fnv1a, $testhashu32_fnv1a, $testhashs64_fnv1a, $testhashu64_fnv1a, $testarrayofbools, $testf, $testf2, $testf3, $testarrayofstring2, $testarrayofsortedstruct, $flex, $test5, $vector_of_longs, $vector_of_doubles, $parent_namespace_test, $vector_of_referrables, $single_weak_reference, $vector_of_weak_references, $vector_of_strong_referrables, $co_owning_reference, $vector_of_co_owning_references, $non_owning_reference, $vector_of_non_owning_references)
{
- $builder->startObject(35);
+ $builder->startObject(43);
self::addPos($builder, $pos);
self::addMana($builder, $mana);
self::addHp($builder, $hp);
self::addVectorOfLongs($builder, $vector_of_longs);
self::addVectorOfDoubles($builder, $vector_of_doubles);
self::addParentNamespaceTest($builder, $parent_namespace_test);
+ self::addVectorOfReferrables($builder, $vector_of_referrables);
+ self::addSingleWeakReference($builder, $single_weak_reference);
+ self::addVectorOfWeakReferences($builder, $vector_of_weak_references);
+ self::addVectorOfStrongReferrables($builder, $vector_of_strong_referrables);
+ self::addCoOwningReference($builder, $co_owning_reference);
+ self::addVectorOfCoOwningReferences($builder, $vector_of_co_owning_references);
+ self::addNonOwningReference($builder, $non_owning_reference);
+ self::addVectorOfNonOwningReferences($builder, $vector_of_non_owning_references);
$o = $builder->endObject();
$builder->required($o, 10); // name
return $o;
$builder->addOffsetX(34, $parentNamespaceTest, 0);
}
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param VectorOffset
+ * @return void
+ */
+ public static function addVectorOfReferrables(FlatBufferBuilder $builder, $vectorOfReferrables)
+ {
+ $builder->addOffsetX(35, $vectorOfReferrables, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param array offset array
+ * @return int vector offset
+ */
+ public static function createVectorOfReferrablesVector(FlatBufferBuilder $builder, array $data)
+ {
+ $builder->startVector(4, count($data), 4);
+ for ($i = count($data) - 1; $i >= 0; $i--) {
+ $builder->addOffset($data[$i]);
+ }
+ return $builder->endVector();
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param int $numElems
+ * @return void
+ */
+ public static function startVectorOfReferrablesVector(FlatBufferBuilder $builder, $numElems)
+ {
+ $builder->startVector(4, $numElems, 4);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param ulong
+ * @return void
+ */
+ public static function addSingleWeakReference(FlatBufferBuilder $builder, $singleWeakReference)
+ {
+ $builder->addUlongX(36, $singleWeakReference, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param VectorOffset
+ * @return void
+ */
+ public static function addVectorOfWeakReferences(FlatBufferBuilder $builder, $vectorOfWeakReferences)
+ {
+ $builder->addOffsetX(37, $vectorOfWeakReferences, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param array offset array
+ * @return int vector offset
+ */
+ public static function createVectorOfWeakReferencesVector(FlatBufferBuilder $builder, array $data)
+ {
+ $builder->startVector(8, count($data), 8);
+ for ($i = count($data) - 1; $i >= 0; $i--) {
+ $builder->addUlong($data[$i]);
+ }
+ return $builder->endVector();
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param int $numElems
+ * @return void
+ */
+ public static function startVectorOfWeakReferencesVector(FlatBufferBuilder $builder, $numElems)
+ {
+ $builder->startVector(8, $numElems, 8);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param VectorOffset
+ * @return void
+ */
+ public static function addVectorOfStrongReferrables(FlatBufferBuilder $builder, $vectorOfStrongReferrables)
+ {
+ $builder->addOffsetX(38, $vectorOfStrongReferrables, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param array offset array
+ * @return int vector offset
+ */
+ public static function createVectorOfStrongReferrablesVector(FlatBufferBuilder $builder, array $data)
+ {
+ $builder->startVector(4, count($data), 4);
+ for ($i = count($data) - 1; $i >= 0; $i--) {
+ $builder->addOffset($data[$i]);
+ }
+ return $builder->endVector();
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param int $numElems
+ * @return void
+ */
+ public static function startVectorOfStrongReferrablesVector(FlatBufferBuilder $builder, $numElems)
+ {
+ $builder->startVector(4, $numElems, 4);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param ulong
+ * @return void
+ */
+ public static function addCoOwningReference(FlatBufferBuilder $builder, $coOwningReference)
+ {
+ $builder->addUlongX(39, $coOwningReference, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param VectorOffset
+ * @return void
+ */
+ public static function addVectorOfCoOwningReferences(FlatBufferBuilder $builder, $vectorOfCoOwningReferences)
+ {
+ $builder->addOffsetX(40, $vectorOfCoOwningReferences, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param array offset array
+ * @return int vector offset
+ */
+ public static function createVectorOfCoOwningReferencesVector(FlatBufferBuilder $builder, array $data)
+ {
+ $builder->startVector(8, count($data), 8);
+ for ($i = count($data) - 1; $i >= 0; $i--) {
+ $builder->addUlong($data[$i]);
+ }
+ return $builder->endVector();
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param int $numElems
+ * @return void
+ */
+ public static function startVectorOfCoOwningReferencesVector(FlatBufferBuilder $builder, $numElems)
+ {
+ $builder->startVector(8, $numElems, 8);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param ulong
+ * @return void
+ */
+ public static function addNonOwningReference(FlatBufferBuilder $builder, $nonOwningReference)
+ {
+ $builder->addUlongX(41, $nonOwningReference, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param VectorOffset
+ * @return void
+ */
+ public static function addVectorOfNonOwningReferences(FlatBufferBuilder $builder, $vectorOfNonOwningReferences)
+ {
+ $builder->addOffsetX(42, $vectorOfNonOwningReferences, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param array offset array
+ * @return int vector offset
+ */
+ public static function createVectorOfNonOwningReferencesVector(FlatBufferBuilder $builder, array $data)
+ {
+ $builder->startVector(8, count($data), 8);
+ for ($i = count($data) - 1; $i >= 0; $i--) {
+ $builder->addUlong($data[$i]);
+ }
+ return $builder->endVector();
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param int $numElems
+ * @return void
+ */
+ public static function startVectorOfNonOwningReferencesVector(FlatBufferBuilder $builder, $numElems)
+ {
+ $builder->startVector(8, $numElems, 8);
+ }
+
/**
* @param FlatBufferBuilder $builder
* @return int table offset
return obj
return None
-def MonsterStart(builder): builder.StartObject(35)
+ # Monster
+ def VectorOfReferrables(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from .Referrable import Referrable
+ obj = Referrable()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # Monster
+ def VectorOfReferrablesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Monster
+ def SingleWeakReference(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(76))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
+ return 0
+
+ # Monster
+ def VectorOfWeakReferences(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+ return 0
+
+ # Monster
+ def VectorOfWeakReferencesAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
+ return 0
+
+ # Monster
+ def VectorOfWeakReferencesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Monster
+ def VectorOfStrongReferrables(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from .Referrable import Referrable
+ obj = Referrable()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # Monster
+ def VectorOfStrongReferrablesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Monster
+ def CoOwningReference(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(82))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
+ return 0
+
+ # Monster
+ def VectorOfCoOwningReferences(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+ return 0
+
+ # Monster
+ def VectorOfCoOwningReferencesAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
+ return 0
+
+ # Monster
+ def VectorOfCoOwningReferencesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # Monster
+ def NonOwningReference(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(86))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
+ return 0
+
+ # Monster
+ def VectorOfNonOwningReferences(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
+ return 0
+
+ # Monster
+ def VectorOfNonOwningReferencesAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
+ return 0
+
+ # Monster
+ def VectorOfNonOwningReferencesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+def MonsterStart(builder): builder.StartObject(43)
def MonsterAddPos(builder, pos): builder.PrependStructSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pos), 0)
def MonsterAddMana(builder, mana): builder.PrependInt16Slot(1, mana, 150)
def MonsterAddHp(builder, hp): builder.PrependInt16Slot(2, hp, 100)
def MonsterAddVectorOfDoubles(builder, vectorOfDoubles): builder.PrependUOffsetTRelativeSlot(33, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfDoubles), 0)
def MonsterStartVectorOfDoublesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterAddParentNamespaceTest(builder, parentNamespaceTest): builder.PrependUOffsetTRelativeSlot(34, flatbuffers.number_types.UOffsetTFlags.py_type(parentNamespaceTest), 0)
+def MonsterAddVectorOfReferrables(builder, vectorOfReferrables): builder.PrependUOffsetTRelativeSlot(35, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfReferrables), 0)
+def MonsterStartVectorOfReferrablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
+def MonsterAddSingleWeakReference(builder, singleWeakReference): builder.PrependUint64Slot(36, singleWeakReference, 0)
+def MonsterAddVectorOfWeakReferences(builder, vectorOfWeakReferences): builder.PrependUOffsetTRelativeSlot(37, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfWeakReferences), 0)
+def MonsterStartVectorOfWeakReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
+def MonsterAddVectorOfStrongReferrables(builder, vectorOfStrongReferrables): builder.PrependUOffsetTRelativeSlot(38, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfStrongReferrables), 0)
+def MonsterStartVectorOfStrongReferrablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
+def MonsterAddCoOwningReference(builder, coOwningReference): builder.PrependUint64Slot(39, coOwningReference, 0)
+def MonsterAddVectorOfCoOwningReferences(builder, vectorOfCoOwningReferences): builder.PrependUOffsetTRelativeSlot(40, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfCoOwningReferences), 0)
+def MonsterStartVectorOfCoOwningReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
+def MonsterAddNonOwningReference(builder, nonOwningReference): builder.PrependUint64Slot(41, nonOwningReference, 0)
+def MonsterAddVectorOfNonOwningReferences(builder, vectorOfNonOwningReferences): builder.PrependUOffsetTRelativeSlot(42, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfNonOwningReferences), 0)
+def MonsterStartVectorOfNonOwningReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterEnd(builder): return builder.EndObject()
-//Generated by flatc compiler (version 1.8.0)
+//Generated by flatc compiler (version 1.9.0)
//If you make any local changes, they will be lost
//source: monster_test.fbs
--- /dev/null
+// <auto-generated>
+// automatically generated by the FlatBuffers compiler, do not modify
+// </auto-generated>
+
+namespace MyGame.Example
+{
+
+using global::System;
+using global::FlatBuffers;
+
+public struct Referrable : IFlatbufferObject
+{
+ private Table __p;
+ public ByteBuffer ByteBuffer { get { return __p.bb; } }
+ public static Referrable GetRootAsReferrable(ByteBuffer _bb) { return GetRootAsReferrable(_bb, new Referrable()); }
+ public static Referrable GetRootAsReferrable(ByteBuffer _bb, Referrable obj) { return (obj.__assign(_bb.GetInt(_bb.Position) + _bb.Position, _bb)); }
+ public void __init(int _i, ByteBuffer _bb) { __p.bb_pos = _i; __p.bb = _bb; }
+ public Referrable __assign(int _i, ByteBuffer _bb) { __init(_i, _bb); return this; }
+
+ public ulong Id { get { int o = __p.__offset(4); return o != 0 ? __p.bb.GetUlong(o + __p.bb_pos) : (ulong)0; } }
+ public bool MutateId(ulong id) { int o = __p.__offset(4); if (o != 0) { __p.bb.PutUlong(o + __p.bb_pos, id); return true; } else { return false; } }
+
+ public static Offset<Referrable> CreateReferrable(FlatBufferBuilder builder,
+ ulong id = 0) {
+ builder.StartObject(1);
+ Referrable.AddId(builder, id);
+ return Referrable.EndReferrable(builder);
+ }
+
+ public static void StartReferrable(FlatBufferBuilder builder) { builder.StartObject(1); }
+ public static void AddId(FlatBufferBuilder builder, ulong id) { builder.AddUlong(0, id, 0); }
+ public static Offset<Referrable> EndReferrable(FlatBufferBuilder builder) {
+ int o = builder.EndObject();
+ return new Offset<Referrable>(o);
+ }
+
+ public static VectorOffset CreateSortedVectorOfReferrable(FlatBufferBuilder builder, Offset<Referrable>[] offsets) {
+ Array.Sort(offsets, (Offset<Referrable> o1, Offset<Referrable> o2) => builder.DataBuffer.GetUlong(Table.__offset(4, o1.Value, builder.DataBuffer)).CompareTo(builder.DataBuffer.GetUlong(Table.__offset(4, o2.Value, builder.DataBuffer))));
+ return builder.CreateVectorOfTables(offsets);
+ }
+
+ public static Referrable? __lookup_by_key(int vectorLocation, ulong key, ByteBuffer bb) {
+ int span = bb.GetInt(vectorLocation - 4);
+ int start = 0;
+ while (span != 0) {
+ int middle = span / 2;
+ int tableOffset = Table.__indirect(vectorLocation + 4 * (start + middle), bb);
+ int comp = bb.GetUlong(Table.__offset(4, bb.Length - tableOffset, bb)).CompareTo(key);
+ if (comp > 0) {
+ span = middle;
+ } else if (comp < 0) {
+ middle++;
+ start += middle;
+ span -= middle;
+ } else {
+ return new Referrable().__assign(tableOffset, bb);
+ }
+ }
+ return null;
+ }
+};
+
+
+}
--- /dev/null
+// automatically generated by the FlatBuffers compiler, do not modify
+
+package Example
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Referrable struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsReferrable(buf []byte, offset flatbuffers.UOffsetT) *Referrable {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Referrable{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Referrable) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Referrable) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Referrable) Id() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetUint64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Referrable) MutateId(n uint64) bool {
+ return rcv._tab.MutateUint64Slot(4, n)
+}
+
+func ReferrableStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func ReferrableAddId(builder *flatbuffers.Builder, id uint64) {
+ builder.PrependUint64Slot(0, id, 0)
+}
+func ReferrableEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
--- /dev/null
+// automatically generated by the FlatBuffers compiler, do not modify
+
+package MyGame.Example;
+
+import java.nio.*;
+import java.lang.*;
+import java.util.*;
+import com.google.flatbuffers.*;
+
+@SuppressWarnings("unused")
+public final class Referrable extends Table {
+ public static Referrable getRootAsReferrable(ByteBuffer _bb) { return getRootAsReferrable(_bb, new Referrable()); }
+ public static Referrable getRootAsReferrable(ByteBuffer _bb, Referrable obj) { _bb.order(ByteOrder.LITTLE_ENDIAN); return (obj.__assign(_bb.getInt(_bb.position()) + _bb.position(), _bb)); }
+ public void __init(int _i, ByteBuffer _bb) { bb_pos = _i; bb = _bb; }
+ public Referrable __assign(int _i, ByteBuffer _bb) { __init(_i, _bb); return this; }
+
+ public long id() { int o = __offset(4); return o != 0 ? bb.getLong(o + bb_pos) : 0L; }
+ public boolean mutateId(long id) { int o = __offset(4); if (o != 0) { bb.putLong(o + bb_pos, id); return true; } else { return false; } }
+
+ public static int createReferrable(FlatBufferBuilder builder,
+ long id) {
+ builder.startObject(1);
+ Referrable.addId(builder, id);
+ return Referrable.endReferrable(builder);
+ }
+
+ public static void startReferrable(FlatBufferBuilder builder) { builder.startObject(1); }
+ public static void addId(FlatBufferBuilder builder, long id) { builder.addLong(0, id, 0L); }
+ public static int endReferrable(FlatBufferBuilder builder) {
+ int o = builder.endObject();
+ return o;
+ }
+
+ @Override
+ protected int keysCompare(Integer o1, Integer o2, ByteBuffer _bb) {
+ long val_1 = _bb.getLong(__offset(4, o1, _bb));
+ long val_2 = _bb.getLong(__offset(4, o2, _bb));
+ return val_1 > val_2 ? 1 : val_1 < val_2 ? -1 : 0;
+ }
+
+ public static Referrable __lookup_by_key(Referrable obj, int vectorLocation, long key, ByteBuffer bb) {
+ int span = bb.getInt(vectorLocation - 4);
+ int start = 0;
+ while (span != 0) {
+ int middle = span / 2;
+ int tableOffset = __indirect(vectorLocation + 4 * (start + middle), bb);
+ long val = bb.getLong(__offset(4, bb.capacity() - tableOffset, bb));
+ int comp = val > key ? 1 : val < key ? -1 : 0;
+ if (comp > 0) {
+ span = middle;
+ } else if (comp < 0) {
+ middle++;
+ start += middle;
+ span -= middle;
+ } else {
+ return (obj == null ? new Referrable() : obj).__assign(tableOffset, bb);
+ }
+ }
+ return null;
+ }
+}
+
--- /dev/null
+<?php
+// automatically generated by the FlatBuffers compiler, do not modify
+
+namespace MyGame\Example;
+
+use \Google\FlatBuffers\Struct;
+use \Google\FlatBuffers\Table;
+use \Google\FlatBuffers\ByteBuffer;
+use \Google\FlatBuffers\FlatBufferBuilder;
+
+class Referrable extends Table
+{
+ /**
+ * @param ByteBuffer $bb
+ * @return Referrable
+ */
+ public static function getRootAsReferrable(ByteBuffer $bb)
+ {
+ $obj = new Referrable();
+ return ($obj->init($bb->getInt($bb->getPosition()) + $bb->getPosition(), $bb));
+ }
+
+ public static function ReferrableIdentifier()
+ {
+ return "MONS";
+ }
+
+ public static function ReferrableBufferHasIdentifier(ByteBuffer $buf)
+ {
+ return self::__has_identifier($buf, self::ReferrableIdentifier());
+ }
+
+ public static function ReferrableExtension()
+ {
+ return "mon";
+ }
+
+ /**
+ * @param int $_i offset
+ * @param ByteBuffer $_bb
+ * @return Referrable
+ **/
+ public function init($_i, ByteBuffer $_bb)
+ {
+ $this->bb_pos = $_i;
+ $this->bb = $_bb;
+ return $this;
+ }
+
+ /**
+ * @return ulong
+ */
+ public function getId()
+ {
+ $o = $this->__offset(4);
+ return $o != 0 ? $this->bb->getUlong($o + $this->bb_pos) : 0;
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @return void
+ */
+ public static function startReferrable(FlatBufferBuilder $builder)
+ {
+ $builder->StartObject(1);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @return Referrable
+ */
+ public static function createReferrable(FlatBufferBuilder $builder, $id)
+ {
+ $builder->startObject(1);
+ self::addId($builder, $id);
+ $o = $builder->endObject();
+ return $o;
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @param ulong
+ * @return void
+ */
+ public static function addId(FlatBufferBuilder $builder, $id)
+ {
+ $builder->addUlongX(0, $id, 0);
+ }
+
+ /**
+ * @param FlatBufferBuilder $builder
+ * @return int table offset
+ */
+ public static function endReferrable(FlatBufferBuilder $builder)
+ {
+ $o = $builder->endObject();
+ return $o;
+ }
+}
--- /dev/null
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: Example
+
+import flatbuffers
+
+class Referrable(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsReferrable(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = Referrable()
+ x.Init(buf, n + offset)
+ return x
+
+ # Referrable
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # Referrable
+ def Id(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
+ return 0
+
+def ReferrableStart(builder): builder.StartObject(1)
+def ReferrableAddId(builder, id): builder.PrependUint64Slot(0, id, 0)
+def ReferrableEnd(builder): return builder.EndObject()
public String id() { int o = __offset(4); return o != 0 ? __string(o + bb_pos) : null; }
public ByteBuffer idAsByteBuffer() { return __vector_as_bytebuffer(4, 1); }
+ public ByteBuffer idInByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 4, 1); }
public long val() { int o = __offset(6); return o != 0 ? bb.getLong(o + bb_pos) : 0L; }
public boolean mutateVal(long val) { int o = __offset(6); if (o != 0) { bb.putLong(o + bb_pos, val); return true; } else { return false; } }
public int count() { int o = __offset(8); return o != 0 ? bb.getShort(o + bb_pos) & 0xFFFF : 0; }
public byte v8(int j) { int o = __offset(24); return o != 0 ? bb.get(__vector(o) + j * 1) : 0; }
public int v8Length() { int o = __offset(24); return o != 0 ? __vector_len(o) : 0; }
public ByteBuffer v8AsByteBuffer() { return __vector_as_bytebuffer(24, 1); }
+ public ByteBuffer v8InByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 24, 1); }
public boolean mutateV8(int j, byte v8) { int o = __offset(24); if (o != 0) { bb.put(__vector(o) + j * 1, v8); return true; } else { return false; } }
public double vf64(int j) { int o = __offset(26); return o != 0 ? bb.getDouble(__vector(o) + j * 8) : 0; }
public int vf64Length() { int o = __offset(26); return o != 0 ? __vector_len(o) : 0; }
public ByteBuffer vf64AsByteBuffer() { return __vector_as_bytebuffer(26, 8); }
+ public ByteBuffer vf64InByteBuffer(ByteBuffer _bb) { return __vector_in_bytebuffer(_bb, 26, 8); }
public boolean mutateVf64(int j, double vf64) { int o = __offset(26); if (o != 0) { bb.putDouble(__vector(o) + j * 8, vf64); return true; } else { return false; } }
public static int createTypeAliases(FlatBufferBuilder builder,
..\%buildtype%\flatc.exe -b --schema --bfbs-comments -I include_test monster_test.fbs
..\%buildtype%\flatc.exe --jsonschema --schema -I include_test monster_test.fbs
cd ../samples
-..\%buildtype%\flatc.exe --cpp --gen-mutable --gen-object-api --cpp-ptr-type flatbuffers::unique_ptr monster.fbs
+..\%buildtype%\flatc.exe --cpp --gen-mutable --reflect-names --gen-object-api --cpp-ptr-type flatbuffers::unique_ptr monster.fbs
cd ../reflection
cd ../tests
\ No newline at end of file
count:ushort;
}
+table Referrable {
+ id:ulong(key, hash:"fnv1a_64");
+}
+
/// an example documentation comment: monster object
table Monster {
pos:Vec3 (id: 0);
vector_of_longs:[long] (id:32);
vector_of_doubles:[double] (id:33);
parent_namespace_test:InParentNamespace (id:34);
+ vector_of_referrables:[Referrable](id:35);
+ single_weak_reference:ulong(id:36, hash:"fnv1a_64", cpp_type:"ReferrableT");
+ vector_of_weak_references:[ulong](id:37, hash:"fnv1a_64", cpp_type:"ReferrableT");
+ vector_of_strong_referrables:[Referrable](id:38, cpp_ptr_type:"std::unique_ptr"); //was shared_ptr
+ co_owning_reference:ulong(id:39, hash:"fnv1a_64", cpp_type:"ReferrableT", cpp_ptr_type:"naked"); //was shared_ptr as well
+ vector_of_co_owning_references:[ulong](id:40, hash:"fnv1a_64", cpp_type:"ReferrableT", cpp_ptr_type:"std::unique_ptr", cpp_ptr_type_get:".get()"); //was shared_ptr
+ non_owning_reference:ulong(id:41, hash:"fnv1a_64", cpp_type:"ReferrableT", cpp_ptr_type:"naked", cpp_ptr_type_get:""); //was weak_ptr
+ vector_of_non_owning_references:[ulong](id:42, hash:"fnv1a_64", cpp_type:"ReferrableT", cpp_ptr_type:"naked", cpp_ptr_type_get:""); //was weak_ptr
}
table TypeAliases {
},
"additionalProperties" : false
},
+ "MyGame_Example_Referrable" : {
+ "type" : "object",
+ "properties" : {
+ "id" : { "type" : "number" }
+ },
+ "additionalProperties" : false
+ },
"MyGame_Example_Monster" : {
"type" : "object",
"description" : " an example documentation comment: monster object",
"test5" : { "type" : "array", "items" : { "$ref" : "#/definitions/MyGame_Example_Test" } },
"vector_of_longs" : { "type" : "array", "items" : { "type" : "number" } },
"vector_of_doubles" : { "type" : "array", "items" : { "type" : "number" } },
- "parent_namespace_test" : { "$ref" : "#/definitions/MyGame_InParentNamespace" }
+ "parent_namespace_test" : { "$ref" : "#/definitions/MyGame_InParentNamespace" },
+ "vector_of_referrables" : { "type" : "array", "items" : { "$ref" : "#/definitions/MyGame_Example_Referrable" } },
+ "single_weak_reference" : { "type" : "number" },
+ "vector_of_weak_references" : { "type" : "array", "items" : { "type" : "number" } },
+ "vector_of_strong_referrables" : { "type" : "array", "items" : { "$ref" : "#/definitions/MyGame_Example_Referrable" } },
+ "co_owning_reference" : { "type" : "number" },
+ "vector_of_co_owning_references" : { "type" : "array", "items" : { "type" : "number" } },
+ "non_owning_reference" : { "type" : "number" },
+ "vector_of_non_owning_references" : { "type" : "array", "items" : { "type" : "number" } }
},
"required" : ["name"],
"additionalProperties" : false
struct Stat;
struct StatT;
+struct Referrable;
+struct ReferrableT;
+
struct Monster;
struct MonsterT;
struct TypeAliases;
struct TypeAliasesT;
+} // namespace Example
+
+inline const flatbuffers::TypeTable *InParentNamespaceTypeTable();
+
+namespace Example2 {
+
+inline const flatbuffers::TypeTable *MonsterTypeTable();
+
+} // namespace Example2
+
+namespace Example {
+
+inline const flatbuffers::TypeTable *TestTypeTable();
+
+inline const flatbuffers::TypeTable *TestSimpleTableWithEnumTypeTable();
+
+inline const flatbuffers::TypeTable *Vec3TypeTable();
+
+inline const flatbuffers::TypeTable *AbilityTypeTable();
+
+inline const flatbuffers::TypeTable *StatTypeTable();
+
+inline const flatbuffers::TypeTable *ReferrableTypeTable();
+
+inline const flatbuffers::TypeTable *MonsterTypeTable();
+
+inline const flatbuffers::TypeTable *TypeAliasesTypeTable();
+
enum Color {
Color_Red = 1,
Color_Green = 2,
Color_ANY = 11
};
-inline Color (&EnumValuesColor())[3] {
- static Color values[] = {
+inline const Color (&EnumValuesColor())[3] {
+ static const Color values[] = {
Color_Red,
Color_Green,
Color_Blue
return values;
}
-inline const char **EnumNamesColor() {
- static const char *names[] = {
+inline const char * const *EnumNamesColor() {
+ static const char * const names[] = {
"Red",
"Green",
"",
Any_MAX = Any_MyGame_Example2_Monster
};
-inline Any (&EnumValuesAny())[4] {
- static Any values[] = {
+inline const Any (&EnumValuesAny())[4] {
+ static const Any values[] = {
Any_NONE,
Any_Monster,
Any_TestSimpleTableWithEnum,
return values;
}
-inline const char **EnumNamesAny() {
- static const char *names[] = {
+inline const char * const *EnumNamesAny() {
+ static const char * const names[] = {
"NONE",
"Monster",
"TestSimpleTableWithEnum",
bool VerifyAny(flatbuffers::Verifier &verifier, const void *obj, Any type);
bool VerifyAnyVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
-MANUALLY_ALIGNED_STRUCT(2) Test FLATBUFFERS_FINAL_CLASS {
+FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(2) Test FLATBUFFERS_FINAL_CLASS {
private:
int16_t a_;
int8_t b_;
flatbuffers::WriteScalar(&b_, _b);
}
};
-STRUCT_END(Test, 4);
+FLATBUFFERS_STRUCT_END(Test, 4);
-MANUALLY_ALIGNED_STRUCT(16) Vec3 FLATBUFFERS_FINAL_CLASS {
+FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(16) Vec3 FLATBUFFERS_FINAL_CLASS {
private:
float x_;
float y_;
return test3_;
}
};
-STRUCT_END(Vec3, 32);
+FLATBUFFERS_STRUCT_END(Vec3, 32);
-MANUALLY_ALIGNED_STRUCT(4) Ability FLATBUFFERS_FINAL_CLASS {
+FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) Ability FLATBUFFERS_FINAL_CLASS {
private:
uint32_t id_;
uint32_t distance_;
flatbuffers::WriteScalar(&distance_, _distance);
}
};
-STRUCT_END(Ability, 8);
+FLATBUFFERS_STRUCT_END(Ability, 8);
} // namespace Example
struct InParentNamespace FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef InParentNamespaceT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return InParentNamespaceTypeTable();
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
verifier.EndTable();
struct Monster FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef MonsterT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return MonsterTypeTable();
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
verifier.EndTable();
struct TestSimpleTableWithEnum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TestSimpleTableWithEnumT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return TestSimpleTableWithEnumTypeTable();
+ }
enum {
VT_COLOR = 4
};
struct Stat FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef StatT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return StatTypeTable();
+ }
enum {
VT_ID = 4,
VT_VAL = 6,
flatbuffers::Offset<Stat> CreateStat(flatbuffers::FlatBufferBuilder &_fbb, const StatT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct ReferrableT : public flatbuffers::NativeTable {
+ typedef Referrable TableType;
+ uint64_t id;
+ ReferrableT()
+ : id(0) {
+ }
+};
+
+struct Referrable FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef ReferrableT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return ReferrableTypeTable();
+ }
+ enum {
+ VT_ID = 4
+ };
+ uint64_t id() const {
+ return GetField<uint64_t>(VT_ID, 0);
+ }
+ bool mutate_id(uint64_t _id) {
+ return SetField<uint64_t>(VT_ID, _id, 0);
+ }
+ bool KeyCompareLessThan(const Referrable *o) const {
+ return id() < o->id();
+ }
+ int KeyCompareWithValue(uint64_t val) const {
+ const auto key = id();
+ if (key < val) {
+ return -1;
+ } else if (key > val) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint64_t>(verifier, VT_ID) &&
+ verifier.EndTable();
+ }
+ ReferrableT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ReferrableT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Referrable> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReferrableT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ReferrableBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_id(uint64_t id) {
+ fbb_.AddElement<uint64_t>(Referrable::VT_ID, id, 0);
+ }
+ explicit ReferrableBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ReferrableBuilder &operator=(const ReferrableBuilder &);
+ flatbuffers::Offset<Referrable> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Referrable>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Referrable> CreateReferrable(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ uint64_t id = 0) {
+ ReferrableBuilder builder_(_fbb);
+ builder_.add_id(id);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<Referrable> CreateReferrable(flatbuffers::FlatBufferBuilder &_fbb, const ReferrableT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct MonsterT : public flatbuffers::NativeTable {
typedef Monster TableType;
flatbuffers::unique_ptr<Vec3> pos;
std::vector<int64_t> vector_of_longs;
std::vector<double> vector_of_doubles;
flatbuffers::unique_ptr<MyGame::InParentNamespaceT> parent_namespace_test;
+ std::vector<flatbuffers::unique_ptr<ReferrableT>> vector_of_referrables;
+ ReferrableT *single_weak_reference;
+ std::vector<ReferrableT *> vector_of_weak_references;
+ std::vector<std::unique_ptr<ReferrableT>> vector_of_strong_referrables;
+ ReferrableT *co_owning_reference;
+ std::vector<std::unique_ptr<ReferrableT>> vector_of_co_owning_references;
+ ReferrableT *non_owning_reference;
+ std::vector<ReferrableT *> vector_of_non_owning_references;
MonsterT()
: mana(150),
hp(100),
testhashs64_fnv1(0),
testhashu64_fnv1(0),
testhashs32_fnv1a(0),
- testhashu32_fnv1a(0),
+ testhashu32_fnv1a(nullptr),
testhashs64_fnv1a(0),
testhashu64_fnv1a(0),
testf(3.14159f),
testf2(3.0f),
- testf3(0.0f) {
+ testf3(0.0f),
+ single_weak_reference(nullptr),
+ co_owning_reference(nullptr),
+ non_owning_reference(nullptr) {
}
};
/// an example documentation comment: monster object
struct Monster FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef MonsterT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return MonsterTypeTable();
+ }
enum {
VT_POS = 4,
VT_MANA = 6,
VT_TEST5 = 66,
VT_VECTOR_OF_LONGS = 68,
VT_VECTOR_OF_DOUBLES = 70,
- VT_PARENT_NAMESPACE_TEST = 72
+ VT_PARENT_NAMESPACE_TEST = 72,
+ VT_VECTOR_OF_REFERRABLES = 74,
+ VT_SINGLE_WEAK_REFERENCE = 76,
+ VT_VECTOR_OF_WEAK_REFERENCES = 78,
+ VT_VECTOR_OF_STRONG_REFERRABLES = 80,
+ VT_CO_OWNING_REFERENCE = 82,
+ VT_VECTOR_OF_CO_OWNING_REFERENCES = 84,
+ VT_NON_OWNING_REFERENCE = 86,
+ VT_VECTOR_OF_NON_OWNING_REFERENCES = 88
};
const Vec3 *pos() const {
return GetStruct<const Vec3 *>(VT_POS);
MyGame::InParentNamespace *mutable_parent_namespace_test() {
return GetPointer<MyGame::InParentNamespace *>(VT_PARENT_NAMESPACE_TEST);
}
+ const flatbuffers::Vector<flatbuffers::Offset<Referrable>> *vector_of_referrables() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Referrable>> *>(VT_VECTOR_OF_REFERRABLES);
+ }
+ flatbuffers::Vector<flatbuffers::Offset<Referrable>> *mutable_vector_of_referrables() {
+ return GetPointer<flatbuffers::Vector<flatbuffers::Offset<Referrable>> *>(VT_VECTOR_OF_REFERRABLES);
+ }
+ uint64_t single_weak_reference() const {
+ return GetField<uint64_t>(VT_SINGLE_WEAK_REFERENCE, 0);
+ }
+ bool mutate_single_weak_reference(uint64_t _single_weak_reference) {
+ return SetField<uint64_t>(VT_SINGLE_WEAK_REFERENCE, _single_weak_reference, 0);
+ }
+ const flatbuffers::Vector<uint64_t> *vector_of_weak_references() const {
+ return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_WEAK_REFERENCES);
+ }
+ flatbuffers::Vector<uint64_t> *mutable_vector_of_weak_references() {
+ return GetPointer<flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_WEAK_REFERENCES);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<Referrable>> *vector_of_strong_referrables() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Referrable>> *>(VT_VECTOR_OF_STRONG_REFERRABLES);
+ }
+ flatbuffers::Vector<flatbuffers::Offset<Referrable>> *mutable_vector_of_strong_referrables() {
+ return GetPointer<flatbuffers::Vector<flatbuffers::Offset<Referrable>> *>(VT_VECTOR_OF_STRONG_REFERRABLES);
+ }
+ uint64_t co_owning_reference() const {
+ return GetField<uint64_t>(VT_CO_OWNING_REFERENCE, 0);
+ }
+ bool mutate_co_owning_reference(uint64_t _co_owning_reference) {
+ return SetField<uint64_t>(VT_CO_OWNING_REFERENCE, _co_owning_reference, 0);
+ }
+ const flatbuffers::Vector<uint64_t> *vector_of_co_owning_references() const {
+ return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_CO_OWNING_REFERENCES);
+ }
+ flatbuffers::Vector<uint64_t> *mutable_vector_of_co_owning_references() {
+ return GetPointer<flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_CO_OWNING_REFERENCES);
+ }
+ uint64_t non_owning_reference() const {
+ return GetField<uint64_t>(VT_NON_OWNING_REFERENCE, 0);
+ }
+ bool mutate_non_owning_reference(uint64_t _non_owning_reference) {
+ return SetField<uint64_t>(VT_NON_OWNING_REFERENCE, _non_owning_reference, 0);
+ }
+ const flatbuffers::Vector<uint64_t> *vector_of_non_owning_references() const {
+ return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_NON_OWNING_REFERENCES);
+ }
+ flatbuffers::Vector<uint64_t> *mutable_vector_of_non_owning_references() {
+ return GetPointer<flatbuffers::Vector<uint64_t> *>(VT_VECTOR_OF_NON_OWNING_REFERENCES);
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<Vec3>(verifier, VT_POS) &&
verifier.Verify(vector_of_doubles()) &&
VerifyOffset(verifier, VT_PARENT_NAMESPACE_TEST) &&
verifier.VerifyTable(parent_namespace_test()) &&
+ VerifyOffset(verifier, VT_VECTOR_OF_REFERRABLES) &&
+ verifier.Verify(vector_of_referrables()) &&
+ verifier.VerifyVectorOfTables(vector_of_referrables()) &&
+ VerifyField<uint64_t>(verifier, VT_SINGLE_WEAK_REFERENCE) &&
+ VerifyOffset(verifier, VT_VECTOR_OF_WEAK_REFERENCES) &&
+ verifier.Verify(vector_of_weak_references()) &&
+ VerifyOffset(verifier, VT_VECTOR_OF_STRONG_REFERRABLES) &&
+ verifier.Verify(vector_of_strong_referrables()) &&
+ verifier.VerifyVectorOfTables(vector_of_strong_referrables()) &&
+ VerifyField<uint64_t>(verifier, VT_CO_OWNING_REFERENCE) &&
+ VerifyOffset(verifier, VT_VECTOR_OF_CO_OWNING_REFERENCES) &&
+ verifier.Verify(vector_of_co_owning_references()) &&
+ VerifyField<uint64_t>(verifier, VT_NON_OWNING_REFERENCE) &&
+ VerifyOffset(verifier, VT_VECTOR_OF_NON_OWNING_REFERENCES) &&
+ verifier.Verify(vector_of_non_owning_references()) &&
verifier.EndTable();
}
MonsterT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void add_parent_namespace_test(flatbuffers::Offset<MyGame::InParentNamespace> parent_namespace_test) {
fbb_.AddOffset(Monster::VT_PARENT_NAMESPACE_TEST, parent_namespace_test);
}
+ void add_vector_of_referrables(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Referrable>>> vector_of_referrables) {
+ fbb_.AddOffset(Monster::VT_VECTOR_OF_REFERRABLES, vector_of_referrables);
+ }
+ void add_single_weak_reference(uint64_t single_weak_reference) {
+ fbb_.AddElement<uint64_t>(Monster::VT_SINGLE_WEAK_REFERENCE, single_weak_reference, 0);
+ }
+ void add_vector_of_weak_references(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_weak_references) {
+ fbb_.AddOffset(Monster::VT_VECTOR_OF_WEAK_REFERENCES, vector_of_weak_references);
+ }
+ void add_vector_of_strong_referrables(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Referrable>>> vector_of_strong_referrables) {
+ fbb_.AddOffset(Monster::VT_VECTOR_OF_STRONG_REFERRABLES, vector_of_strong_referrables);
+ }
+ void add_co_owning_reference(uint64_t co_owning_reference) {
+ fbb_.AddElement<uint64_t>(Monster::VT_CO_OWNING_REFERENCE, co_owning_reference, 0);
+ }
+ void add_vector_of_co_owning_references(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_co_owning_references) {
+ fbb_.AddOffset(Monster::VT_VECTOR_OF_CO_OWNING_REFERENCES, vector_of_co_owning_references);
+ }
+ void add_non_owning_reference(uint64_t non_owning_reference) {
+ fbb_.AddElement<uint64_t>(Monster::VT_NON_OWNING_REFERENCE, non_owning_reference, 0);
+ }
+ void add_vector_of_non_owning_references(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_non_owning_references) {
+ fbb_.AddOffset(Monster::VT_VECTOR_OF_NON_OWNING_REFERENCES, vector_of_non_owning_references);
+ }
explicit MonsterBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
flatbuffers::Offset<flatbuffers::Vector<const Test *>> test5 = 0,
flatbuffers::Offset<flatbuffers::Vector<int64_t>> vector_of_longs = 0,
flatbuffers::Offset<flatbuffers::Vector<double>> vector_of_doubles = 0,
- flatbuffers::Offset<MyGame::InParentNamespace> parent_namespace_test = 0) {
+ flatbuffers::Offset<MyGame::InParentNamespace> parent_namespace_test = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Referrable>>> vector_of_referrables = 0,
+ uint64_t single_weak_reference = 0,
+ flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_weak_references = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Referrable>>> vector_of_strong_referrables = 0,
+ uint64_t co_owning_reference = 0,
+ flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_co_owning_references = 0,
+ uint64_t non_owning_reference = 0,
+ flatbuffers::Offset<flatbuffers::Vector<uint64_t>> vector_of_non_owning_references = 0) {
MonsterBuilder builder_(_fbb);
+ builder_.add_non_owning_reference(non_owning_reference);
+ builder_.add_co_owning_reference(co_owning_reference);
+ builder_.add_single_weak_reference(single_weak_reference);
builder_.add_testhashu64_fnv1a(testhashu64_fnv1a);
builder_.add_testhashs64_fnv1a(testhashs64_fnv1a);
builder_.add_testhashu64_fnv1(testhashu64_fnv1);
builder_.add_testhashs64_fnv1(testhashs64_fnv1);
+ builder_.add_vector_of_non_owning_references(vector_of_non_owning_references);
+ builder_.add_vector_of_co_owning_references(vector_of_co_owning_references);
+ builder_.add_vector_of_strong_referrables(vector_of_strong_referrables);
+ builder_.add_vector_of_weak_references(vector_of_weak_references);
+ builder_.add_vector_of_referrables(vector_of_referrables);
builder_.add_parent_namespace_test(parent_namespace_test);
builder_.add_vector_of_doubles(vector_of_doubles);
builder_.add_vector_of_longs(vector_of_longs);
const std::vector<Test> *test5 = nullptr,
const std::vector<int64_t> *vector_of_longs = nullptr,
const std::vector<double> *vector_of_doubles = nullptr,
- flatbuffers::Offset<MyGame::InParentNamespace> parent_namespace_test = 0) {
+ flatbuffers::Offset<MyGame::InParentNamespace> parent_namespace_test = 0,
+ const std::vector<flatbuffers::Offset<Referrable>> *vector_of_referrables = nullptr,
+ uint64_t single_weak_reference = 0,
+ const std::vector<uint64_t> *vector_of_weak_references = nullptr,
+ const std::vector<flatbuffers::Offset<Referrable>> *vector_of_strong_referrables = nullptr,
+ uint64_t co_owning_reference = 0,
+ const std::vector<uint64_t> *vector_of_co_owning_references = nullptr,
+ uint64_t non_owning_reference = 0,
+ const std::vector<uint64_t> *vector_of_non_owning_references = nullptr) {
return MyGame::Example::CreateMonster(
_fbb,
pos,
test5 ? _fbb.CreateVectorOfStructs<Test>(*test5) : 0,
vector_of_longs ? _fbb.CreateVector<int64_t>(*vector_of_longs) : 0,
vector_of_doubles ? _fbb.CreateVector<double>(*vector_of_doubles) : 0,
- parent_namespace_test);
+ parent_namespace_test,
+ vector_of_referrables ? _fbb.CreateVector<flatbuffers::Offset<Referrable>>(*vector_of_referrables) : 0,
+ single_weak_reference,
+ vector_of_weak_references ? _fbb.CreateVector<uint64_t>(*vector_of_weak_references) : 0,
+ vector_of_strong_referrables ? _fbb.CreateVector<flatbuffers::Offset<Referrable>>(*vector_of_strong_referrables) : 0,
+ co_owning_reference,
+ vector_of_co_owning_references ? _fbb.CreateVector<uint64_t>(*vector_of_co_owning_references) : 0,
+ non_owning_reference,
+ vector_of_non_owning_references ? _fbb.CreateVector<uint64_t>(*vector_of_non_owning_references) : 0);
}
flatbuffers::Offset<Monster> CreateMonster(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct TypeAliases FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TypeAliasesT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return TypeAliasesTypeTable();
+ }
enum {
VT_I8 = 4,
VT_U8 = 6,
_count);
}
+inline ReferrableT *Referrable::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new ReferrableT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void Referrable::UnPackTo(ReferrableT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = id(); _o->id = _e; };
+}
+
+inline flatbuffers::Offset<Referrable> Referrable::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReferrableT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateReferrable(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Referrable> CreateReferrable(flatbuffers::FlatBufferBuilder &_fbb, const ReferrableT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReferrableT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _id = _o->id;
+ return MyGame::Example::CreateReferrable(
+ _fbb,
+ _id);
+}
+
inline MonsterT *Monster::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new MonsterT();
UnPackTo(_o, _resolver);
{ auto _e = testhashs64_fnv1(); _o->testhashs64_fnv1 = _e; };
{ auto _e = testhashu64_fnv1(); _o->testhashu64_fnv1 = _e; };
{ auto _e = testhashs32_fnv1a(); _o->testhashs32_fnv1a = _e; };
- { auto _e = testhashu32_fnv1a(); if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->testhashu32_fnv1a), static_cast<flatbuffers::hash_value_t>(_e)); else _o->testhashu32_fnv1a = nullptr; };
+ { auto _e = testhashu32_fnv1a(); //scalar resolver, naked
+if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->testhashu32_fnv1a), static_cast<flatbuffers::hash_value_t>(_e)); else _o->testhashu32_fnv1a = nullptr; };
{ auto _e = testhashs64_fnv1a(); _o->testhashs64_fnv1a = _e; };
{ auto _e = testhashu64_fnv1a(); _o->testhashu64_fnv1a = _e; };
{ auto _e = testarrayofbools(); if (_e) { _o->testarrayofbools.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->testarrayofbools[_i] = _e->Get(_i) != 0; } } };
{ auto _e = vector_of_longs(); if (_e) { _o->vector_of_longs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_longs[_i] = _e->Get(_i); } } };
{ auto _e = vector_of_doubles(); if (_e) { _o->vector_of_doubles.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_doubles[_i] = _e->Get(_i); } } };
{ auto _e = parent_namespace_test(); if (_e) _o->parent_namespace_test = flatbuffers::unique_ptr<MyGame::InParentNamespaceT>(_e->UnPack(_resolver)); };
+ { auto _e = vector_of_referrables(); if (_e) { _o->vector_of_referrables.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_referrables[_i] = flatbuffers::unique_ptr<ReferrableT>(_e->Get(_i)->UnPack(_resolver)); } } };
+ { auto _e = single_weak_reference(); //scalar resolver, naked
+if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->single_weak_reference), static_cast<flatbuffers::hash_value_t>(_e)); else _o->single_weak_reference = nullptr; };
+ { auto _e = vector_of_weak_references(); if (_e) { _o->vector_of_weak_references.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { //vector resolver, naked
+if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->vector_of_weak_references[_i]), static_cast<flatbuffers::hash_value_t>(_e->Get(_i))); else _o->vector_of_weak_references[_i] = nullptr; } } };
+ { auto _e = vector_of_strong_referrables(); if (_e) { _o->vector_of_strong_referrables.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vector_of_strong_referrables[_i] = std::unique_ptr<ReferrableT>(_e->Get(_i)->UnPack(_resolver)); } } };
+ { auto _e = co_owning_reference(); //scalar resolver, naked
+if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->co_owning_reference), static_cast<flatbuffers::hash_value_t>(_e)); else _o->co_owning_reference = nullptr; };
+ { auto _e = vector_of_co_owning_references(); if (_e) { _o->vector_of_co_owning_references.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { //vector resolver, std::unique_ptr
+if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->vector_of_co_owning_references[_i]), static_cast<flatbuffers::hash_value_t>(_e->Get(_i)));/* else do nothing */; } } };
+ { auto _e = non_owning_reference(); //scalar resolver, naked
+if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->non_owning_reference), static_cast<flatbuffers::hash_value_t>(_e)); else _o->non_owning_reference = nullptr; };
+ { auto _e = vector_of_non_owning_references(); if (_e) { _o->vector_of_non_owning_references.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { //vector resolver, naked
+if (_resolver) (*_resolver)(reinterpret_cast<void **>(&_o->vector_of_non_owning_references[_i]), static_cast<flatbuffers::hash_value_t>(_e->Get(_i))); else _o->vector_of_non_owning_references[_i] = nullptr; } } };
}
inline flatbuffers::Offset<Monster> Monster::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MonsterT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
auto _vector_of_longs = _o->vector_of_longs.size() ? _fbb.CreateVector(_o->vector_of_longs) : 0;
auto _vector_of_doubles = _o->vector_of_doubles.size() ? _fbb.CreateVector(_o->vector_of_doubles) : 0;
auto _parent_namespace_test = _o->parent_namespace_test ? CreateInParentNamespace(_fbb, _o->parent_namespace_test.get(), _rehasher) : 0;
+ auto _vector_of_referrables = _o->vector_of_referrables.size() ? _fbb.CreateVector<flatbuffers::Offset<Referrable>> (_o->vector_of_referrables.size(), [](size_t i, _VectorArgs *__va) { return CreateReferrable(*__va->__fbb, __va->__o->vector_of_referrables[i].get(), __va->__rehasher); }, &_va ) : 0;
+ auto _single_weak_reference = _rehasher ? static_cast<uint64_t>((*_rehasher)(_o->single_weak_reference)) : 0;
+ auto _vector_of_weak_references = _o->vector_of_weak_references.size() ? _fbb.CreateVector<uint64_t>(_o->vector_of_weak_references.size(), [](size_t i, _VectorArgs *__va) { return __va->__rehasher ? static_cast<uint64_t>((*__va->__rehasher)(__va->__o->vector_of_weak_references[i])) : 0; }, &_va ) : 0;
+ auto _vector_of_strong_referrables = _o->vector_of_strong_referrables.size() ? _fbb.CreateVector<flatbuffers::Offset<Referrable>> (_o->vector_of_strong_referrables.size(), [](size_t i, _VectorArgs *__va) { return CreateReferrable(*__va->__fbb, __va->__o->vector_of_strong_referrables[i].get(), __va->__rehasher); }, &_va ) : 0;
+ auto _co_owning_reference = _rehasher ? static_cast<uint64_t>((*_rehasher)(_o->co_owning_reference)) : 0;
+ auto _vector_of_co_owning_references = _o->vector_of_co_owning_references.size() ? _fbb.CreateVector<uint64_t>(_o->vector_of_co_owning_references.size(), [](size_t i, _VectorArgs *__va) { return __va->__rehasher ? static_cast<uint64_t>((*__va->__rehasher)(__va->__o->vector_of_co_owning_references[i].get())) : 0; }, &_va ) : 0;
+ auto _non_owning_reference = _rehasher ? static_cast<uint64_t>((*_rehasher)(_o->non_owning_reference)) : 0;
+ auto _vector_of_non_owning_references = _o->vector_of_non_owning_references.size() ? _fbb.CreateVector<uint64_t>(_o->vector_of_non_owning_references.size(), [](size_t i, _VectorArgs *__va) { return __va->__rehasher ? static_cast<uint64_t>((*__va->__rehasher)(__va->__o->vector_of_non_owning_references[i])) : 0; }, &_va ) : 0;
return MyGame::Example::CreateMonster(
_fbb,
_pos,
_test5,
_vector_of_longs,
_vector_of_doubles,
- _parent_namespace_test);
+ _parent_namespace_test,
+ _vector_of_referrables,
+ _single_weak_reference,
+ _vector_of_weak_references,
+ _vector_of_strong_referrables,
+ _co_owning_reference,
+ _vector_of_co_owning_references,
+ _non_owning_reference,
+ _vector_of_non_owning_references);
}
inline TypeAliasesT *TypeAliases::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
inline AnyUnion::AnyUnion(const AnyUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) {
switch (type) {
case Any_Monster: {
- assert(false); // MonsterT not copyable.
+ FLATBUFFERS_ASSERT(false); // MonsterT not copyable.
break;
}
case Any_TestSimpleTableWithEnum: {
type = Any_NONE;
}
-} // namespace Example
-
-inline flatbuffers::TypeTable *InParentNamespaceTypeTable();
-
-namespace Example2 {
-
-inline flatbuffers::TypeTable *MonsterTypeTable();
-
-} // namespace Example2
-
-namespace Example {
-
-inline flatbuffers::TypeTable *TestTypeTable();
-
-inline flatbuffers::TypeTable *TestSimpleTableWithEnumTypeTable();
-
-inline flatbuffers::TypeTable *Vec3TypeTable();
-
-inline flatbuffers::TypeTable *AbilityTypeTable();
-
-inline flatbuffers::TypeTable *StatTypeTable();
-
-inline flatbuffers::TypeTable *MonsterTypeTable();
-
-inline flatbuffers::TypeTable *TypeAliasesTypeTable();
-
-inline flatbuffers::TypeTable *ColorTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *ColorTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
ColorTypeTable
};
static const int32_t values[] = { 1, 2, 8 };
- static const char *names[] = {
+ static const char * const names[] = {
"Red",
"Green",
"Blue"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 3, type_codes, type_refs, values, names
};
return &tt;
}
-inline flatbuffers::TypeTable *AnyTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *AnyTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 1 },
{ flatbuffers::ET_SEQUENCE, 0, 2 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
MonsterTypeTable,
TestSimpleTableWithEnumTypeTable,
MyGame::Example2::MonsterTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"NONE",
"Monster",
"TestSimpleTableWithEnum",
"MyGame_Example2_Monster"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_UNION, 4, type_codes, type_refs, nullptr, names
};
return &tt;
} // namespace Example
-inline flatbuffers::TypeTable *InParentNamespaceTypeTable() {
- static flatbuffers::TypeTable tt = {
+inline const flatbuffers::TypeTable *InParentNamespaceTypeTable() {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr
};
return &tt;
namespace Example2 {
-inline flatbuffers::TypeTable *MonsterTypeTable() {
- static flatbuffers::TypeTable tt = {
+inline const flatbuffers::TypeTable *MonsterTypeTable() {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr
};
return &tt;
namespace Example {
-inline flatbuffers::TypeTable *TestTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *TestTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_CHAR, 0, -1 }
};
static const int32_t values[] = { 0, 2, 4 };
- static const char *names[] = {
+ static const char * const names[] = {
"a",
"b"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 2, type_codes, nullptr, values, names
};
return &tt;
}
-inline flatbuffers::TypeTable *TestSimpleTableWithEnumTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *TestSimpleTableWithEnumTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
ColorTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"color"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *Vec3TypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *Vec3TypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 1 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
ColorTypeTable,
TestTypeTable
};
static const int32_t values[] = { 0, 4, 8, 16, 24, 26, 32 };
- static const char *names[] = {
+ static const char * const names[] = {
"x",
"y",
"z",
"test2",
"test3"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 6, type_codes, type_refs, values, names
};
return &tt;
}
-inline flatbuffers::TypeTable *AbilityTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *AbilityTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_UINT, 0, -1 },
{ flatbuffers::ET_UINT, 0, -1 }
};
static const int32_t values[] = { 0, 4, 8 };
- static const char *names[] = {
+ static const char * const names[] = {
"id",
"distance"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 2, type_codes, nullptr, values, names
};
return &tt;
}
-inline flatbuffers::TypeTable *StatTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *StatTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_STRING, 0, -1 },
{ flatbuffers::ET_LONG, 0, -1 },
{ flatbuffers::ET_USHORT, 0, -1 }
};
- static const char *names[] = {
+ static const char * const names[] = {
"id",
"val",
"count"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *MonsterTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *ReferrableTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
+ { flatbuffers::ET_ULONG, 0, -1 }
+ };
+ static const char * const names[] = {
+ "id"
+ };
+ static const flatbuffers::TypeTable tt = {
+ flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names
+ };
+ return &tt;
+}
+
+inline const flatbuffers::TypeTable *MonsterTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 1, 3 },
{ flatbuffers::ET_LONG, 1, -1 },
{ flatbuffers::ET_DOUBLE, 1, -1 },
- { flatbuffers::ET_SEQUENCE, 0, 7 }
+ { flatbuffers::ET_SEQUENCE, 0, 7 },
+ { flatbuffers::ET_SEQUENCE, 1, 8 },
+ { flatbuffers::ET_ULONG, 0, -1 },
+ { flatbuffers::ET_ULONG, 1, -1 },
+ { flatbuffers::ET_SEQUENCE, 1, 8 },
+ { flatbuffers::ET_ULONG, 0, -1 },
+ { flatbuffers::ET_ULONG, 1, -1 },
+ { flatbuffers::ET_ULONG, 0, -1 },
+ { flatbuffers::ET_ULONG, 1, -1 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
Vec3TypeTable,
ColorTypeTable,
AnyTypeTable,
MonsterTypeTable,
StatTypeTable,
AbilityTypeTable,
- MyGame::InParentNamespaceTypeTable
+ MyGame::InParentNamespaceTypeTable,
+ ReferrableTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"pos",
"mana",
"hp",
"test5",
"vector_of_longs",
"vector_of_doubles",
- "parent_namespace_test"
+ "parent_namespace_test",
+ "vector_of_referrables",
+ "single_weak_reference",
+ "vector_of_weak_references",
+ "vector_of_strong_referrables",
+ "co_owning_reference",
+ "vector_of_co_owning_references",
+ "non_owning_reference",
+ "vector_of_non_owning_references"
};
- static flatbuffers::TypeTable tt = {
- flatbuffers::ST_TABLE, 35, type_codes, type_refs, nullptr, names
+ static const flatbuffers::TypeTable tt = {
+ flatbuffers::ST_TABLE, 43, type_codes, type_refs, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *TypeAliasesTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *TypeAliasesTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, -1 },
{ flatbuffers::ET_UCHAR, 0, -1 },
{ flatbuffers::ET_SHORT, 0, -1 },
{ flatbuffers::ET_CHAR, 1, -1 },
{ flatbuffers::ET_DOUBLE, 1, -1 }
};
- static const char *names[] = {
+ static const char * const names[] = {
"i8",
"u8",
"i16",
"v8",
"vf64"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 12, type_codes, nullptr, nullptr, names
};
return &tt;
return flatbuffers::GetRoot<MyGame::Example::Monster>(buf);
}
+inline const MyGame::Example::Monster *GetSizePrefixedMonster(const void *buf) {
+ return flatbuffers::GetSizePrefixedRoot<MyGame::Example::Monster>(buf);
+}
+
inline Monster *GetMutableMonster(void *buf) {
return flatbuffers::GetMutableRoot<Monster>(buf);
}
return verifier.VerifyBuffer<MyGame::Example::Monster>(MonsterIdentifier());
}
+inline bool VerifySizePrefixedMonsterBuffer(
+ flatbuffers::Verifier &verifier) {
+ return verifier.VerifySizePrefixedBuffer<MyGame::Example::Monster>(MonsterIdentifier());
+}
+
inline const char *MonsterExtension() {
return "mon";
}
fbb.Finish(root, MonsterIdentifier());
}
+inline void FinishSizePrefixedMonsterBuffer(
+ flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<MyGame::Example::Monster> root) {
+ fbb.FinishSizePrefixed(root, MonsterIdentifier());
+}
+
inline flatbuffers::unique_ptr<MonsterT> UnPackMonster(
const void *buf,
const flatbuffers::resolver_function_t *res = nullptr) {
return offset;
};
+/**
+ * @constructor
+ */
+MyGame.Example.Referrable = function() {
+ /**
+ * @type {flatbuffers.ByteBuffer}
+ */
+ this.bb = null;
+
+ /**
+ * @type {number}
+ */
+ this.bb_pos = 0;
+};
+
+/**
+ * @param {number} i
+ * @param {flatbuffers.ByteBuffer} bb
+ * @returns {MyGame.Example.Referrable}
+ */
+MyGame.Example.Referrable.prototype.__init = function(i, bb) {
+ this.bb_pos = i;
+ this.bb = bb;
+ return this;
+};
+
+/**
+ * @param {flatbuffers.ByteBuffer} bb
+ * @param {MyGame.Example.Referrable=} obj
+ * @returns {MyGame.Example.Referrable}
+ */
+MyGame.Example.Referrable.getRootAsReferrable = function(bb, obj) {
+ return (obj || new MyGame.Example.Referrable).__init(bb.readInt32(bb.position()) + bb.position(), bb);
+};
+
+/**
+ * @returns {flatbuffers.Long}
+ */
+MyGame.Example.Referrable.prototype.id = function() {
+ var offset = this.bb.__offset(this.bb_pos, 4);
+ return offset ? this.bb.readUint64(this.bb_pos + offset) : this.bb.createLong(0, 0);
+};
+
+/**
+ * @param {flatbuffers.Long} value
+ * @returns {boolean}
+ */
+MyGame.Example.Referrable.prototype.mutate_id = function(value) {
+ var offset = this.bb.__offset(this.bb_pos, 4);
+
+ if (offset === 0) {
+ return false;
+ }
+
+ this.bb.writeUint64(this.bb_pos + offset, value);
+ return true;
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ */
+MyGame.Example.Referrable.startReferrable = function(builder) {
+ builder.startObject(1);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Long} id
+ */
+MyGame.Example.Referrable.addId = function(builder, id) {
+ builder.addFieldInt64(0, id, builder.createLong(0, 0));
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @returns {flatbuffers.Offset}
+ */
+MyGame.Example.Referrable.endReferrable = function(builder) {
+ var offset = builder.endObject();
+ return offset;
+};
+
/**
* an example documentation comment: monster object
*
return offset ? (obj || new MyGame.InParentNamespace).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null;
};
+/**
+ * @param {number} index
+ * @param {MyGame.Example.Referrable=} obj
+ * @returns {MyGame.Example.Referrable}
+ */
+MyGame.Example.Monster.prototype.vectorOfReferrables = function(index, obj) {
+ var offset = this.bb.__offset(this.bb_pos, 74);
+ return offset ? (obj || new MyGame.Example.Referrable).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null;
+};
+
+/**
+ * @returns {number}
+ */
+MyGame.Example.Monster.prototype.vectorOfReferrablesLength = function() {
+ var offset = this.bb.__offset(this.bb_pos, 74);
+ return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0;
+};
+
+/**
+ * @returns {flatbuffers.Long}
+ */
+MyGame.Example.Monster.prototype.singleWeakReference = function() {
+ var offset = this.bb.__offset(this.bb_pos, 76);
+ return offset ? this.bb.readUint64(this.bb_pos + offset) : this.bb.createLong(0, 0);
+};
+
+/**
+ * @param {flatbuffers.Long} value
+ * @returns {boolean}
+ */
+MyGame.Example.Monster.prototype.mutate_single_weak_reference = function(value) {
+ var offset = this.bb.__offset(this.bb_pos, 76);
+
+ if (offset === 0) {
+ return false;
+ }
+
+ this.bb.writeUint64(this.bb_pos + offset, value);
+ return true;
+};
+
+/**
+ * @param {number} index
+ * @returns {flatbuffers.Long}
+ */
+MyGame.Example.Monster.prototype.vectorOfWeakReferences = function(index) {
+ var offset = this.bb.__offset(this.bb_pos, 78);
+ return offset ? this.bb.readUint64(this.bb.__vector(this.bb_pos + offset) + index * 8) : this.bb.createLong(0, 0);
+};
+
+/**
+ * @returns {number}
+ */
+MyGame.Example.Monster.prototype.vectorOfWeakReferencesLength = function() {
+ var offset = this.bb.__offset(this.bb_pos, 78);
+ return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0;
+};
+
+/**
+ * @param {number} index
+ * @param {MyGame.Example.Referrable=} obj
+ * @returns {MyGame.Example.Referrable}
+ */
+MyGame.Example.Monster.prototype.vectorOfStrongReferrables = function(index, obj) {
+ var offset = this.bb.__offset(this.bb_pos, 80);
+ return offset ? (obj || new MyGame.Example.Referrable).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null;
+};
+
+/**
+ * @returns {number}
+ */
+MyGame.Example.Monster.prototype.vectorOfStrongReferrablesLength = function() {
+ var offset = this.bb.__offset(this.bb_pos, 80);
+ return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0;
+};
+
+/**
+ * @returns {flatbuffers.Long}
+ */
+MyGame.Example.Monster.prototype.coOwningReference = function() {
+ var offset = this.bb.__offset(this.bb_pos, 82);
+ return offset ? this.bb.readUint64(this.bb_pos + offset) : this.bb.createLong(0, 0);
+};
+
+/**
+ * @param {flatbuffers.Long} value
+ * @returns {boolean}
+ */
+MyGame.Example.Monster.prototype.mutate_co_owning_reference = function(value) {
+ var offset = this.bb.__offset(this.bb_pos, 82);
+
+ if (offset === 0) {
+ return false;
+ }
+
+ this.bb.writeUint64(this.bb_pos + offset, value);
+ return true;
+};
+
+/**
+ * @param {number} index
+ * @returns {flatbuffers.Long}
+ */
+MyGame.Example.Monster.prototype.vectorOfCoOwningReferences = function(index) {
+ var offset = this.bb.__offset(this.bb_pos, 84);
+ return offset ? this.bb.readUint64(this.bb.__vector(this.bb_pos + offset) + index * 8) : this.bb.createLong(0, 0);
+};
+
+/**
+ * @returns {number}
+ */
+MyGame.Example.Monster.prototype.vectorOfCoOwningReferencesLength = function() {
+ var offset = this.bb.__offset(this.bb_pos, 84);
+ return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0;
+};
+
+/**
+ * @returns {flatbuffers.Long}
+ */
+MyGame.Example.Monster.prototype.nonOwningReference = function() {
+ var offset = this.bb.__offset(this.bb_pos, 86);
+ return offset ? this.bb.readUint64(this.bb_pos + offset) : this.bb.createLong(0, 0);
+};
+
+/**
+ * @param {flatbuffers.Long} value
+ * @returns {boolean}
+ */
+MyGame.Example.Monster.prototype.mutate_non_owning_reference = function(value) {
+ var offset = this.bb.__offset(this.bb_pos, 86);
+
+ if (offset === 0) {
+ return false;
+ }
+
+ this.bb.writeUint64(this.bb_pos + offset, value);
+ return true;
+};
+
+/**
+ * @param {number} index
+ * @returns {flatbuffers.Long}
+ */
+MyGame.Example.Monster.prototype.vectorOfNonOwningReferences = function(index) {
+ var offset = this.bb.__offset(this.bb_pos, 88);
+ return offset ? this.bb.readUint64(this.bb.__vector(this.bb_pos + offset) + index * 8) : this.bb.createLong(0, 0);
+};
+
+/**
+ * @returns {number}
+ */
+MyGame.Example.Monster.prototype.vectorOfNonOwningReferencesLength = function() {
+ var offset = this.bb.__offset(this.bb_pos, 88);
+ return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0;
+};
+
/**
* @param {flatbuffers.Builder} builder
*/
MyGame.Example.Monster.startMonster = function(builder) {
- builder.startObject(35);
+ builder.startObject(43);
};
/**
builder.addFieldOffset(34, parentNamespaceTestOffset, 0);
};
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfReferrablesOffset
+ */
+MyGame.Example.Monster.addVectorOfReferrables = function(builder, vectorOfReferrablesOffset) {
+ builder.addFieldOffset(35, vectorOfReferrablesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Offset>} data
+ * @returns {flatbuffers.Offset}
+ */
+MyGame.Example.Monster.createVectorOfReferrablesVector = function(builder, data) {
+ builder.startVector(4, data.length, 4);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addOffset(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+MyGame.Example.Monster.startVectorOfReferrablesVector = function(builder, numElems) {
+ builder.startVector(4, numElems, 4);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Long} singleWeakReference
+ */
+MyGame.Example.Monster.addSingleWeakReference = function(builder, singleWeakReference) {
+ builder.addFieldInt64(36, singleWeakReference, builder.createLong(0, 0));
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfWeakReferencesOffset
+ */
+MyGame.Example.Monster.addVectorOfWeakReferences = function(builder, vectorOfWeakReferencesOffset) {
+ builder.addFieldOffset(37, vectorOfWeakReferencesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Long>} data
+ * @returns {flatbuffers.Offset}
+ */
+MyGame.Example.Monster.createVectorOfWeakReferencesVector = function(builder, data) {
+ builder.startVector(8, data.length, 8);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addInt64(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+MyGame.Example.Monster.startVectorOfWeakReferencesVector = function(builder, numElems) {
+ builder.startVector(8, numElems, 8);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfStrongReferrablesOffset
+ */
+MyGame.Example.Monster.addVectorOfStrongReferrables = function(builder, vectorOfStrongReferrablesOffset) {
+ builder.addFieldOffset(38, vectorOfStrongReferrablesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Offset>} data
+ * @returns {flatbuffers.Offset}
+ */
+MyGame.Example.Monster.createVectorOfStrongReferrablesVector = function(builder, data) {
+ builder.startVector(4, data.length, 4);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addOffset(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+MyGame.Example.Monster.startVectorOfStrongReferrablesVector = function(builder, numElems) {
+ builder.startVector(4, numElems, 4);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Long} coOwningReference
+ */
+MyGame.Example.Monster.addCoOwningReference = function(builder, coOwningReference) {
+ builder.addFieldInt64(39, coOwningReference, builder.createLong(0, 0));
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfCoOwningReferencesOffset
+ */
+MyGame.Example.Monster.addVectorOfCoOwningReferences = function(builder, vectorOfCoOwningReferencesOffset) {
+ builder.addFieldOffset(40, vectorOfCoOwningReferencesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Long>} data
+ * @returns {flatbuffers.Offset}
+ */
+MyGame.Example.Monster.createVectorOfCoOwningReferencesVector = function(builder, data) {
+ builder.startVector(8, data.length, 8);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addInt64(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+MyGame.Example.Monster.startVectorOfCoOwningReferencesVector = function(builder, numElems) {
+ builder.startVector(8, numElems, 8);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Long} nonOwningReference
+ */
+MyGame.Example.Monster.addNonOwningReference = function(builder, nonOwningReference) {
+ builder.addFieldInt64(41, nonOwningReference, builder.createLong(0, 0));
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfNonOwningReferencesOffset
+ */
+MyGame.Example.Monster.addVectorOfNonOwningReferences = function(builder, vectorOfNonOwningReferencesOffset) {
+ builder.addFieldOffset(42, vectorOfNonOwningReferencesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Long>} data
+ * @returns {flatbuffers.Offset}
+ */
+MyGame.Example.Monster.createVectorOfNonOwningReferencesVector = function(builder, data) {
+ builder.startVector(8, data.length, 8);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addInt64(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+MyGame.Example.Monster.startVectorOfNonOwningReferencesVector = function(builder, numElems) {
+ builder.startVector(8, numElems, 8);
+};
+
/**
* @param {flatbuffers.Builder} builder
* @returns {flatbuffers.Offset}
return offset;
};
+}
+}
+/**
+ * @constructor
+ */
+export namespace MyGame.Example{
+export class Referrable {
+ /**
+ * @type {flatbuffers.ByteBuffer}
+ */
+ bb: flatbuffers.ByteBuffer|null = null;
+
+ /**
+ * @type {number}
+ */
+ bb_pos:number = 0;
+/**
+ * @param {number} i
+ * @param {flatbuffers.ByteBuffer} bb
+ * @returns {Referrable}
+ */
+__init(i:number, bb:flatbuffers.ByteBuffer):Referrable {
+ this.bb_pos = i;
+ this.bb = bb;
+ return this;
+};
+
+/**
+ * @param {flatbuffers.ByteBuffer} bb
+ * @param {Referrable=} obj
+ * @returns {Referrable}
+ */
+static getRootAsReferrable(bb:flatbuffers.ByteBuffer, obj?:Referrable):Referrable {
+ return (obj || new Referrable).__init(bb.readInt32(bb.position()) + bb.position(), bb);
+};
+
+/**
+ * @returns {flatbuffers.Long}
+ */
+id():flatbuffers.Long {
+ var offset = this.bb!.__offset(this.bb_pos, 4);
+ return offset ? this.bb!.readUint64(this.bb_pos + offset) : this.bb!.createLong(0, 0);
+};
+
+/**
+ * @param {flatbuffers.Long} value
+ * @returns {boolean}
+ */
+mutate_id(value:flatbuffers.Long):boolean {
+ var offset = this.bb!.__offset(this.bb_pos, 4);
+
+ if (offset === 0) {
+ return false;
+ }
+
+ this.bb!.writeUint64(this.bb_pos + offset, value);
+ return true;
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ */
+static startReferrable(builder:flatbuffers.Builder) {
+ builder.startObject(1);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Long} id
+ */
+static addId(builder:flatbuffers.Builder, id:flatbuffers.Long) {
+ builder.addFieldInt64(0, id, builder.createLong(0, 0));
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @returns {flatbuffers.Offset}
+ */
+static endReferrable(builder:flatbuffers.Builder):flatbuffers.Offset {
+ var offset = builder.endObject();
+ return offset;
+};
+
}
}
/**
return offset ? (obj || new MyGame.InParentNamespace).__init(this.bb!.__indirect(this.bb_pos + offset), this.bb!) : null;
};
+/**
+ * @param {number} index
+ * @param {MyGame.Example.Referrable=} obj
+ * @returns {MyGame.Example.Referrable}
+ */
+vectorOfReferrables(index: number, obj?:MyGame.Example.Referrable):MyGame.Example.Referrable|null {
+ var offset = this.bb!.__offset(this.bb_pos, 74);
+ return offset ? (obj || new MyGame.Example.Referrable).__init(this.bb!.__indirect(this.bb!.__vector(this.bb_pos + offset) + index * 4), this.bb!) : null;
+};
+
+/**
+ * @returns {number}
+ */
+vectorOfReferrablesLength():number {
+ var offset = this.bb!.__offset(this.bb_pos, 74);
+ return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
+};
+
+/**
+ * @returns {flatbuffers.Long}
+ */
+singleWeakReference():flatbuffers.Long {
+ var offset = this.bb!.__offset(this.bb_pos, 76);
+ return offset ? this.bb!.readUint64(this.bb_pos + offset) : this.bb!.createLong(0, 0);
+};
+
+/**
+ * @param {flatbuffers.Long} value
+ * @returns {boolean}
+ */
+mutate_single_weak_reference(value:flatbuffers.Long):boolean {
+ var offset = this.bb!.__offset(this.bb_pos, 76);
+
+ if (offset === 0) {
+ return false;
+ }
+
+ this.bb!.writeUint64(this.bb_pos + offset, value);
+ return true;
+};
+
+/**
+ * @param {number} index
+ * @returns {flatbuffers.Long}
+ */
+vectorOfWeakReferences(index: number):flatbuffers.Long|null {
+ var offset = this.bb!.__offset(this.bb_pos, 78);
+ return offset ? this.bb!.readUint64(this.bb!.__vector(this.bb_pos + offset) + index * 8) : this.bb!.createLong(0, 0);
+};
+
+/**
+ * @returns {number}
+ */
+vectorOfWeakReferencesLength():number {
+ var offset = this.bb!.__offset(this.bb_pos, 78);
+ return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
+};
+
+/**
+ * @param {number} index
+ * @param {MyGame.Example.Referrable=} obj
+ * @returns {MyGame.Example.Referrable}
+ */
+vectorOfStrongReferrables(index: number, obj?:MyGame.Example.Referrable):MyGame.Example.Referrable|null {
+ var offset = this.bb!.__offset(this.bb_pos, 80);
+ return offset ? (obj || new MyGame.Example.Referrable).__init(this.bb!.__indirect(this.bb!.__vector(this.bb_pos + offset) + index * 4), this.bb!) : null;
+};
+
+/**
+ * @returns {number}
+ */
+vectorOfStrongReferrablesLength():number {
+ var offset = this.bb!.__offset(this.bb_pos, 80);
+ return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
+};
+
+/**
+ * @returns {flatbuffers.Long}
+ */
+coOwningReference():flatbuffers.Long {
+ var offset = this.bb!.__offset(this.bb_pos, 82);
+ return offset ? this.bb!.readUint64(this.bb_pos + offset) : this.bb!.createLong(0, 0);
+};
+
+/**
+ * @param {flatbuffers.Long} value
+ * @returns {boolean}
+ */
+mutate_co_owning_reference(value:flatbuffers.Long):boolean {
+ var offset = this.bb!.__offset(this.bb_pos, 82);
+
+ if (offset === 0) {
+ return false;
+ }
+
+ this.bb!.writeUint64(this.bb_pos + offset, value);
+ return true;
+};
+
+/**
+ * @param {number} index
+ * @returns {flatbuffers.Long}
+ */
+vectorOfCoOwningReferences(index: number):flatbuffers.Long|null {
+ var offset = this.bb!.__offset(this.bb_pos, 84);
+ return offset ? this.bb!.readUint64(this.bb!.__vector(this.bb_pos + offset) + index * 8) : this.bb!.createLong(0, 0);
+};
+
+/**
+ * @returns {number}
+ */
+vectorOfCoOwningReferencesLength():number {
+ var offset = this.bb!.__offset(this.bb_pos, 84);
+ return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
+};
+
+/**
+ * @returns {flatbuffers.Long}
+ */
+nonOwningReference():flatbuffers.Long {
+ var offset = this.bb!.__offset(this.bb_pos, 86);
+ return offset ? this.bb!.readUint64(this.bb_pos + offset) : this.bb!.createLong(0, 0);
+};
+
+/**
+ * @param {flatbuffers.Long} value
+ * @returns {boolean}
+ */
+mutate_non_owning_reference(value:flatbuffers.Long):boolean {
+ var offset = this.bb!.__offset(this.bb_pos, 86);
+
+ if (offset === 0) {
+ return false;
+ }
+
+ this.bb!.writeUint64(this.bb_pos + offset, value);
+ return true;
+};
+
+/**
+ * @param {number} index
+ * @returns {flatbuffers.Long}
+ */
+vectorOfNonOwningReferences(index: number):flatbuffers.Long|null {
+ var offset = this.bb!.__offset(this.bb_pos, 88);
+ return offset ? this.bb!.readUint64(this.bb!.__vector(this.bb_pos + offset) + index * 8) : this.bb!.createLong(0, 0);
+};
+
+/**
+ * @returns {number}
+ */
+vectorOfNonOwningReferencesLength():number {
+ var offset = this.bb!.__offset(this.bb_pos, 88);
+ return offset ? this.bb!.__vector_len(this.bb_pos + offset) : 0;
+};
+
/**
* @param {flatbuffers.Builder} builder
*/
static startMonster(builder:flatbuffers.Builder) {
- builder.startObject(35);
+ builder.startObject(43);
};
/**
builder.addFieldOffset(34, parentNamespaceTestOffset, 0);
};
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfReferrablesOffset
+ */
+static addVectorOfReferrables(builder:flatbuffers.Builder, vectorOfReferrablesOffset:flatbuffers.Offset) {
+ builder.addFieldOffset(35, vectorOfReferrablesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Offset>} data
+ * @returns {flatbuffers.Offset}
+ */
+static createVectorOfReferrablesVector(builder:flatbuffers.Builder, data:flatbuffers.Offset[]):flatbuffers.Offset {
+ builder.startVector(4, data.length, 4);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addOffset(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+static startVectorOfReferrablesVector(builder:flatbuffers.Builder, numElems:number) {
+ builder.startVector(4, numElems, 4);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Long} singleWeakReference
+ */
+static addSingleWeakReference(builder:flatbuffers.Builder, singleWeakReference:flatbuffers.Long) {
+ builder.addFieldInt64(36, singleWeakReference, builder.createLong(0, 0));
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfWeakReferencesOffset
+ */
+static addVectorOfWeakReferences(builder:flatbuffers.Builder, vectorOfWeakReferencesOffset:flatbuffers.Offset) {
+ builder.addFieldOffset(37, vectorOfWeakReferencesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Long>} data
+ * @returns {flatbuffers.Offset}
+ */
+static createVectorOfWeakReferencesVector(builder:flatbuffers.Builder, data:flatbuffers.Long[]):flatbuffers.Offset {
+ builder.startVector(8, data.length, 8);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addInt64(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+static startVectorOfWeakReferencesVector(builder:flatbuffers.Builder, numElems:number) {
+ builder.startVector(8, numElems, 8);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfStrongReferrablesOffset
+ */
+static addVectorOfStrongReferrables(builder:flatbuffers.Builder, vectorOfStrongReferrablesOffset:flatbuffers.Offset) {
+ builder.addFieldOffset(38, vectorOfStrongReferrablesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Offset>} data
+ * @returns {flatbuffers.Offset}
+ */
+static createVectorOfStrongReferrablesVector(builder:flatbuffers.Builder, data:flatbuffers.Offset[]):flatbuffers.Offset {
+ builder.startVector(4, data.length, 4);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addOffset(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+static startVectorOfStrongReferrablesVector(builder:flatbuffers.Builder, numElems:number) {
+ builder.startVector(4, numElems, 4);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Long} coOwningReference
+ */
+static addCoOwningReference(builder:flatbuffers.Builder, coOwningReference:flatbuffers.Long) {
+ builder.addFieldInt64(39, coOwningReference, builder.createLong(0, 0));
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfCoOwningReferencesOffset
+ */
+static addVectorOfCoOwningReferences(builder:flatbuffers.Builder, vectorOfCoOwningReferencesOffset:flatbuffers.Offset) {
+ builder.addFieldOffset(40, vectorOfCoOwningReferencesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Long>} data
+ * @returns {flatbuffers.Offset}
+ */
+static createVectorOfCoOwningReferencesVector(builder:flatbuffers.Builder, data:flatbuffers.Long[]):flatbuffers.Offset {
+ builder.startVector(8, data.length, 8);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addInt64(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+static startVectorOfCoOwningReferencesVector(builder:flatbuffers.Builder, numElems:number) {
+ builder.startVector(8, numElems, 8);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Long} nonOwningReference
+ */
+static addNonOwningReference(builder:flatbuffers.Builder, nonOwningReference:flatbuffers.Long) {
+ builder.addFieldInt64(41, nonOwningReference, builder.createLong(0, 0));
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {flatbuffers.Offset} vectorOfNonOwningReferencesOffset
+ */
+static addVectorOfNonOwningReferences(builder:flatbuffers.Builder, vectorOfNonOwningReferencesOffset:flatbuffers.Offset) {
+ builder.addFieldOffset(42, vectorOfNonOwningReferencesOffset, 0);
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {Array.<flatbuffers.Long>} data
+ * @returns {flatbuffers.Offset}
+ */
+static createVectorOfNonOwningReferencesVector(builder:flatbuffers.Builder, data:flatbuffers.Long[]):flatbuffers.Offset {
+ builder.startVector(8, data.length, 8);
+ for (var i = data.length - 1; i >= 0; i--) {
+ builder.addInt64(data[i]);
+ }
+ return builder.endVector();
+};
+
+/**
+ * @param {flatbuffers.Builder} builder
+ * @param {number} numElems
+ */
+static startVectorOfNonOwningReferencesVector(builder:flatbuffers.Builder, numElems:number) {
+ builder.startVector(8, numElems, 8);
+};
+
/**
* @param {flatbuffers.Builder} builder
* @returns {flatbuffers.Offset}
struct StructInNestedNS;
+inline const flatbuffers::TypeTable *TableInNestedNSTypeTable();
+
+inline const flatbuffers::TypeTable *StructInNestedNSTypeTable();
+
enum EnumInNestedNS {
EnumInNestedNS_A = 0,
EnumInNestedNS_B = 1,
EnumInNestedNS_MAX = EnumInNestedNS_C
};
-inline EnumInNestedNS (&EnumValuesEnumInNestedNS())[3] {
- static EnumInNestedNS values[] = {
+inline const EnumInNestedNS (&EnumValuesEnumInNestedNS())[3] {
+ static const EnumInNestedNS values[] = {
EnumInNestedNS_A,
EnumInNestedNS_B,
EnumInNestedNS_C
return values;
}
-inline const char **EnumNamesEnumInNestedNS() {
- static const char *names[] = {
+inline const char * const *EnumNamesEnumInNestedNS() {
+ static const char * const names[] = {
"A",
"B",
"C",
return EnumNamesEnumInNestedNS()[index];
}
-MANUALLY_ALIGNED_STRUCT(4) StructInNestedNS FLATBUFFERS_FINAL_CLASS {
+FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) StructInNestedNS FLATBUFFERS_FINAL_CLASS {
private:
int32_t a_;
int32_t b_;
flatbuffers::WriteScalar(&b_, _b);
}
};
-STRUCT_END(StructInNestedNS, 8);
+FLATBUFFERS_STRUCT_END(StructInNestedNS, 8);
struct TableInNestedNS FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return TableInNestedNSTypeTable();
+ }
enum {
VT_FOO = 4
};
return builder_.Finish();
}
-inline flatbuffers::TypeTable *TableInNestedNSTypeTable();
-
-inline flatbuffers::TypeTable *StructInNestedNSTypeTable();
-
-inline flatbuffers::TypeTable *EnumInNestedNSTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *EnumInNestedNSTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
EnumInNestedNSTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"A",
"B",
"C"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *TableInNestedNSTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *TableInNestedNSTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 }
};
- static const char *names[] = {
+ static const char * const names[] = {
"foo"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *StructInNestedNSTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *StructInNestedNSTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 }
};
static const int32_t values[] = { 0, 4, 8 };
- static const char *names[] = {
+ static const char * const names[] = {
"a",
"b"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 2, type_codes, nullptr, values, names
};
return &tt;
struct SecondTableInA;
+inline const flatbuffers::TypeTable *TableInFirstNSTypeTable();
+
+} // namespace NamespaceA
+
+namespace NamespaceC {
+
+inline const flatbuffers::TypeTable *TableInCTypeTable();
+
+} // namespace NamespaceC
+
+namespace NamespaceA {
+
+inline const flatbuffers::TypeTable *SecondTableInATypeTable();
+
struct TableInFirstNS FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return TableInFirstNSTypeTable();
+ }
enum {
VT_FOO_TABLE = 4,
VT_FOO_ENUM = 6,
namespace NamespaceC {
struct TableInC FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return TableInCTypeTable();
+ }
enum {
VT_REFER_TO_A1 = 4,
VT_REFER_TO_A2 = 6
namespace NamespaceA {
struct SecondTableInA FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return SecondTableInATypeTable();
+ }
enum {
VT_REFER_TO_C = 4
};
namespace NamespaceA {
-inline flatbuffers::TypeTable *TableInFirstNSTypeTable();
-
-} // namespace NamespaceA
-
-namespace NamespaceC {
-
-inline flatbuffers::TypeTable *TableInCTypeTable();
-
-} // namespace NamespaceC
-
-namespace NamespaceA {
-
-inline flatbuffers::TypeTable *SecondTableInATypeTable();
-
-inline flatbuffers::TypeTable *TableInFirstNSTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *TableInFirstNSTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 1 },
{ flatbuffers::ET_SEQUENCE, 0, 2 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
NamespaceA::NamespaceB::TableInNestedNSTypeTable,
NamespaceA::NamespaceB::EnumInNestedNSTypeTable,
NamespaceA::NamespaceB::StructInNestedNSTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"foo_table",
"foo_enum",
"foo_struct"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names
};
return &tt;
namespace NamespaceC {
-inline flatbuffers::TypeTable *TableInCTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *TableInCTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 1 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
NamespaceA::TableInFirstNSTypeTable,
NamespaceA::SecondTableInATypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"refer_to_a1",
"refer_to_a2"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names
};
return &tt;
namespace NamespaceA {
-inline flatbuffers::TypeTable *SecondTableInATypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *SecondTableInATypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, 0 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
NamespaceC::TableInCTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"refer_to_c"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names
};
return &tt;
// automatically generated by the FlatBuffers compiler, do not modify
-import * as NS9459827973991502386 from "./namespace_test1_generated";
+import * as NS11563891686210618450 from "./namespace_test1_generated";
/**
* @constructor
*/
* @param {NamespaceA.NamespaceB.TableInNestedNS=} obj
* @returns {NamespaceA.NamespaceB.TableInNestedNS|null}
*/
-fooTable(obj?:NS9459827973991502386.NamespaceA.NamespaceB.TableInNestedNS):NS9459827973991502386.NamespaceA.NamespaceB.TableInNestedNS|null {
+fooTable(obj?:NS11563891686210618450.NamespaceA.NamespaceB.TableInNestedNS):NS11563891686210618450.NamespaceA.NamespaceB.TableInNestedNS|null {
var offset = this.bb!.__offset(this.bb_pos, 4);
- return offset ? (obj || new NS9459827973991502386.NamespaceA.NamespaceB.TableInNestedNS).__init(this.bb!.__indirect(this.bb_pos + offset), this.bb!) : null;
+ return offset ? (obj || new NS11563891686210618450.NamespaceA.NamespaceB.TableInNestedNS).__init(this.bb!.__indirect(this.bb_pos + offset), this.bb!) : null;
};
/**
* @returns {NamespaceA.NamespaceB.EnumInNestedNS}
*/
-fooEnum():NS9459827973991502386.NamespaceA.NamespaceB.EnumInNestedNS {
+fooEnum():NS11563891686210618450.NamespaceA.NamespaceB.EnumInNestedNS {
var offset = this.bb!.__offset(this.bb_pos, 6);
- return offset ? /** @type {NamespaceA.NamespaceB.EnumInNestedNS} */ (this.bb!.readInt8(this.bb_pos + offset)) : NS9459827973991502386.NamespaceA.NamespaceB.EnumInNestedNS.A;
+ return offset ? /** @type {NamespaceA.NamespaceB.EnumInNestedNS} */ (this.bb!.readInt8(this.bb_pos + offset)) : NS11563891686210618450.NamespaceA.NamespaceB.EnumInNestedNS.A;
};
/**
* @param {NamespaceA.NamespaceB.EnumInNestedNS} value
* @returns {boolean}
*/
-mutate_foo_enum(value:NS9459827973991502386.NamespaceA.NamespaceB.EnumInNestedNS):boolean {
+mutate_foo_enum(value:NS11563891686210618450.NamespaceA.NamespaceB.EnumInNestedNS):boolean {
var offset = this.bb!.__offset(this.bb_pos, 6);
if (offset === 0) {
* @param {NamespaceA.NamespaceB.StructInNestedNS=} obj
* @returns {NamespaceA.NamespaceB.StructInNestedNS|null}
*/
-fooStruct(obj?:NS9459827973991502386.NamespaceA.NamespaceB.StructInNestedNS):NS9459827973991502386.NamespaceA.NamespaceB.StructInNestedNS|null {
+fooStruct(obj?:NS11563891686210618450.NamespaceA.NamespaceB.StructInNestedNS):NS11563891686210618450.NamespaceA.NamespaceB.StructInNestedNS|null {
var offset = this.bb!.__offset(this.bb_pos, 8);
- return offset ? (obj || new NS9459827973991502386.NamespaceA.NamespaceB.StructInNestedNS).__init(this.bb_pos + offset, this.bb!) : null;
+ return offset ? (obj || new NS11563891686210618450.NamespaceA.NamespaceB.StructInNestedNS).__init(this.bb_pos + offset, this.bb!) : null;
};
/**
* @param {flatbuffers.Builder} builder
* @param {NamespaceA.NamespaceB.EnumInNestedNS} fooEnum
*/
-static addFooEnum(builder:flatbuffers.Builder, fooEnum:NS9459827973991502386.NamespaceA.NamespaceB.EnumInNestedNS) {
- builder.addFieldInt8(1, fooEnum, NS9459827973991502386.NamespaceA.NamespaceB.EnumInNestedNS.A);
+static addFooEnum(builder:flatbuffers.Builder, fooEnum:NS11563891686210618450.NamespaceA.NamespaceB.EnumInNestedNS) {
+ builder.addFieldInt8(1, fooEnum, NS11563891686210618450.NamespaceA.NamespaceB.EnumInNestedNS.A);
};
/**
n:proto.test.ProtoMessage_.OtherMessage;
o:[string];
z:proto.test.ImportedMessage;
+ /// doc comment for r.
+ r:proto.test.ProtoMessage_.Anonymous0;
}
namespace proto.test.ProtoMessage_;
b:float = 3.14149;
}
+table Anonymous0 {
+ /// doc comment for s.
+ s:proto.test.ImportedMessage;
+ /// doc comment for t on 2
+ /// lines.
+ t:proto.test.ProtoMessage_.OtherMessage;
+}
+
optional OtherMessage n = 12;
repeated string o = 14;
optional ImportedMessage z = 16;
+ /// doc comment for r.
+ oneof r {
+ /// doc comment for s.
+ ImportedMessage s = 17;
+ /// doc comment for t on 2
+ /// lines.
+ OtherMessage t = 18;
+ }
}
--- /dev/null
+// Generated from test.proto
+
+namespace proto.test;
+
+/// Enum doc comment.
+enum ProtoEnum : int {
+ FOO = 1,
+ /// Enum 2nd value doc comment misaligned.
+ BAR = 5,
+}
+
+namespace proto.test.ProtoMessage_;
+
+union RUnion {
+ /// doc comment for s.
+ proto.test.ImportedMessage,
+ /// doc comment for t on 2
+ /// lines.
+ proto.test.ProtoMessage_.OtherMessage,
+}
+
+namespace proto.test;
+
+table ImportedMessage {
+ a:int;
+}
+
+/// 2nd table doc comment with
+/// many lines.
+table ProtoMessage {
+ c:int = 16;
+ d:long;
+ p:uint;
+ e:ulong;
+ /// doc comment for f.
+ f:int = -1;
+ g:long;
+ h:uint;
+ q:ulong;
+ i:int;
+ j:long;
+ /// doc comment for k.
+ k:bool;
+ /// doc comment for l on 2
+ /// lines
+ l:string (required);
+ m:[ubyte];
+ n:proto.test.ProtoMessage_.OtherMessage;
+ o:[string];
+ z:proto.test.ImportedMessage;
+ /// doc comment for r.
+ r:proto.test.ProtoMessage_.RUnion;
+}
+
+namespace proto.test.ProtoMessage_;
+
+table OtherMessage {
+ a:double;
+ /// doc comment for b.
+ b:float = 3.14149;
+}
+
from flatbuffers import compat
+from flatbuffers import util
from flatbuffers.compat import range_func as compat_range
from flatbuffers.compat import NumpyRequiredForThisFeature
class TestWireFormat(unittest.TestCase):
def test_wire_format(self):
# Verify that using the generated Python code builds a buffer without
- # returning errors, and is interpreted correctly:
- gen_buf, gen_off = make_monster_from_generated_code()
- CheckReadBuffer(gen_buf, gen_off)
+ # returning errors, and is interpreted correctly, for size prefixed
+ # representation and regular:
+ for sizePrefix in [True, False]:
+ gen_buf, gen_off = make_monster_from_generated_code(sizePrefix = sizePrefix)
+ CheckReadBuffer(gen_buf, gen_off, sizePrefix = sizePrefix)
# Verify that the canonical flatbuffer file is readable by the
# generated Python code. Note that context managers are not part of
f.close()
-def CheckReadBuffer(buf, offset):
+def CheckReadBuffer(buf, offset, sizePrefix = False):
''' CheckReadBuffer checks that the given buffer is evaluated correctly
as the example Monster. '''
if not stmt:
raise AssertionError('CheckReadBuffer case failed')
+ if sizePrefix:
+ size = util.GetSizePrefix(buf, offset)
+ # taken from the size of monsterdata_python_wire.mon, minus 4
+ asserter(size == 348)
+ buf, offset = util.RemoveSizePrefix(buf, offset)
monster = MyGame.Example.Monster.Monster.GetRootAsMonster(buf, offset)
asserter(monster.Hp() == 80)
])
-def make_monster_from_generated_code():
+def make_monster_from_generated_code(sizePrefix = False):
''' Use generated code to build the example Monster. '''
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterAddVectorOfDoubles(b, VectorOfDoubles)
mon = MyGame.Example.Monster.MonsterEnd(b)
- b.Finish(mon)
+ if sizePrefix:
+ b.FinishSizePrefixed(mon)
+ else:
+ b.Finish(mon)
return b.Bytes, b.Head()
#include "flatbuffers/registry.h"
#include "flatbuffers/util.h"
+// clang-format off
+#ifdef FLATBUFFERS_CPP98_STL
+ #include "flatbuffers/stl_emulation.h"
+ namespace std {
+ using flatbuffers::unique_ptr;
+ }
+#endif
+// clang-format on
+
#include "monster_test_generated.h"
#include "namespace_test/namespace_test1_generated.h"
#include "namespace_test/namespace_test2_generated.h"
void SizePrefixedTest() {
// Create size prefixed buffer.
flatbuffers::FlatBufferBuilder fbb;
- fbb.FinishSizePrefixed(
+ FinishSizePrefixedMonsterBuffer(
+ fbb,
CreateMonster(fbb, 0, 200, 300, fbb.CreateString("bob")));
// Verify it.
flatbuffers::Verifier verifier(fbb.GetBufferPointer(), fbb.GetSize());
- TEST_EQ(verifier.VerifySizePrefixedBuffer<Monster>(nullptr), true);
+ TEST_EQ(VerifySizePrefixedMonsterBuffer(verifier), true);
// Access it.
- auto m = flatbuffers::GetSizePrefixedRoot<MyGame::Example::Monster>(
- fbb.GetBufferPointer());
+ auto m = GetSizePrefixedMonster(fbb.GetBufferPointer());
TEST_EQ(m->mana(), 200);
TEST_EQ(m->hp(), 300);
TEST_EQ_STR(m->name()->c_str(), "bob");
}
void MiniReflectFlatBuffersTest(uint8_t *flatbuf) {
- auto s = flatbuffers::FlatBufferToString(flatbuf, MonsterTypeTable());
+ auto s = flatbuffers::FlatBufferToString(flatbuf, Monster::MiniReflectTypeTable());
TEST_EQ_STR(
s.c_str(),
"{ "
// load the .proto and the golden file from disk
std::string protofile;
std::string goldenfile;
+ std::string goldenunionfile;
TEST_EQ(
flatbuffers::LoadFile((test_data_path + "prototest/test.proto").c_str(),
false, &protofile),
flatbuffers::LoadFile((test_data_path + "prototest/test.golden").c_str(),
false, &goldenfile),
true);
+ TEST_EQ(
+ flatbuffers::LoadFile((test_data_path +
+ "prototest/test_union.golden").c_str(),
+ false, &goldenunionfile),
+ true);
flatbuffers::IDLOptions opts;
opts.include_dependence_headers = false;
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse(fbs.c_str(), nullptr), true);
TEST_EQ_STR(fbs.c_str(), goldenfile.c_str());
+
+ // Parse proto with --oneof-union option.
+ opts.proto_oneof_union = true;
+ flatbuffers::Parser parser3(opts);
+ TEST_EQ(parser3.Parse(protofile.c_str(), include_directories), true);
+
+ // Generate fbs.
+ auto fbs_union = flatbuffers::GenerateFBS(parser3, "test");
+
+ // Ensure generated file is parsable.
+ flatbuffers::Parser parser4;
+ TEST_EQ(parser4.Parse(fbs_union.c_str(), nullptr), true);
+ TEST_EQ_STR(fbs_union.c_str(), goldenunionfile.c_str());
}
template<typename T>
struct Movie;
struct MovieT;
+inline const flatbuffers::TypeTable *AttackerTypeTable();
+
+inline const flatbuffers::TypeTable *RapunzelTypeTable();
+
+inline const flatbuffers::TypeTable *BookReaderTypeTable();
+
+inline const flatbuffers::TypeTable *MovieTypeTable();
+
enum Character {
Character_NONE = 0,
Character_MuLan = 1,
Character_MAX = Character_Unused
};
-inline Character (&EnumValuesCharacter())[7] {
- static Character values[] = {
+inline const Character (&EnumValuesCharacter())[7] {
+ static const Character values[] = {
Character_NONE,
Character_MuLan,
Character_Rapunzel,
return values;
}
-inline const char **EnumNamesCharacter() {
- static const char *names[] = {
+inline const char * const *EnumNamesCharacter() {
+ static const char * const names[] = {
"NONE",
"MuLan",
"Rapunzel",
bool VerifyCharacter(flatbuffers::Verifier &verifier, const void *obj, Character type);
bool VerifyCharacterVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
-MANUALLY_ALIGNED_STRUCT(4) Rapunzel FLATBUFFERS_FINAL_CLASS {
+FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) Rapunzel FLATBUFFERS_FINAL_CLASS {
private:
int32_t hair_length_;
flatbuffers::WriteScalar(&hair_length_, _hair_length);
}
};
-STRUCT_END(Rapunzel, 4);
+FLATBUFFERS_STRUCT_END(Rapunzel, 4);
-MANUALLY_ALIGNED_STRUCT(4) BookReader FLATBUFFERS_FINAL_CLASS {
+FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) BookReader FLATBUFFERS_FINAL_CLASS {
private:
int32_t books_read_;
flatbuffers::WriteScalar(&books_read_, _books_read);
}
};
-STRUCT_END(BookReader, 4);
+FLATBUFFERS_STRUCT_END(BookReader, 4);
struct AttackerT : public flatbuffers::NativeTable {
typedef Attacker TableType;
struct Attacker FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef AttackerT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return AttackerTypeTable();
+ }
enum {
VT_SWORD_ATTACK_DAMAGE = 4
};
struct Movie FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef MovieT NativeTableType;
+ static const flatbuffers::TypeTable *MiniReflectTypeTable() {
+ return MovieTypeTable();
+ }
enum {
VT_MAIN_CHARACTER_TYPE = 4,
VT_MAIN_CHARACTER = 6,
type = Character_NONE;
}
-inline flatbuffers::TypeTable *AttackerTypeTable();
-
-inline flatbuffers::TypeTable *RapunzelTypeTable();
-
-inline flatbuffers::TypeTable *BookReaderTypeTable();
-
-inline flatbuffers::TypeTable *MovieTypeTable();
-
-inline flatbuffers::TypeTable *CharacterTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *CharacterTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 1 },
{ flatbuffers::ET_STRING, 0, -1 },
{ flatbuffers::ET_STRING, 0, -1 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
AttackerTypeTable,
RapunzelTypeTable,
BookReaderTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"NONE",
"MuLan",
"Rapunzel",
"Other",
"Unused"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_UNION, 7, type_codes, type_refs, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *AttackerTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *AttackerTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 }
};
- static const char *names[] = {
+ static const char * const names[] = {
"sword_attack_damage"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names
};
return &tt;
}
-inline flatbuffers::TypeTable *RapunzelTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *RapunzelTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 }
};
static const int32_t values[] = { 0, 4 };
- static const char *names[] = {
+ static const char * const names[] = {
"hair_length"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 1, type_codes, nullptr, values, names
};
return &tt;
}
-inline flatbuffers::TypeTable *BookReaderTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *BookReaderTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 }
};
static const int32_t values[] = { 0, 4 };
- static const char *names[] = {
+ static const char * const names[] = {
"books_read"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_STRUCT, 1, type_codes, nullptr, values, names
};
return &tt;
}
-inline flatbuffers::TypeTable *MovieTypeTable() {
- static flatbuffers::TypeCode type_codes[] = {
+inline const flatbuffers::TypeTable *MovieTypeTable() {
+ static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_UTYPE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_UTYPE, 1, 0 },
{ flatbuffers::ET_SEQUENCE, 1, 0 }
};
- static flatbuffers::TypeFunction type_refs[] = {
+ static const flatbuffers::TypeFunction type_refs[] = {
CharacterTypeTable
};
- static const char *names[] = {
+ static const char * const names[] = {
"main_character_type",
"main_character",
"characters_type",
"characters"
};
- static flatbuffers::TypeTable tt = {
+ static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names
};
return &tt;
return flatbuffers::GetRoot<Movie>(buf);
}
+inline const Movie *GetSizePrefixedMovie(const void *buf) {
+ return flatbuffers::GetSizePrefixedRoot<Movie>(buf);
+}
+
inline Movie *GetMutableMovie(void *buf) {
return flatbuffers::GetMutableRoot<Movie>(buf);
}
return verifier.VerifyBuffer<Movie>(MovieIdentifier());
}
+inline bool VerifySizePrefixedMovieBuffer(
+ flatbuffers::Verifier &verifier) {
+ return verifier.VerifySizePrefixedBuffer<Movie>(MovieIdentifier());
+}
+
inline void FinishMovieBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<Movie> root) {
fbb.Finish(root, MovieIdentifier());
}
+inline void FinishSizePrefixedMovieBuffer(
+ flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<Movie> root) {
+ fbb.FinishSizePrefixed(root, MovieIdentifier());
+}
+
inline flatbuffers::unique_ptr<MovieT> UnPackMovie(
const void *buf,
const flatbuffers::resolver_function_t *res = nullptr) {
Intel Corporation
ARM Ltd.
Silk Labs Inc.
+MIPS Tech LLC
Silk Labs:
Andreas Gal <andreas@silklabs.com>
+
+MIPS Tech LLC:
+Alexey Frunze <Alexey.Frunze@mips.com>
Some of the general design is explained in [doc/design.md](doc/design.md).
+**Warning:** This library goes very slow if compiled incorrectly; see below.
+
## Disclaimer
This is not an official Google product (experimental or otherwise), it is just
* ARM with NEON (both 32bit and 64bit).
* Intel x86 with SSE 4.1 (both 32bit and 64bit).
-If you are building for x86, it's important that you pass in the `-msse4.1`
-compiler flag when building, or you'll end up using slow reference code. If
-you're building with Bazel, you can do this by running `bazel build gemmlowp:all
---copt=-msse4.1`. If you're building for a machine with no SIMD support in
-gemmlowp then by default you'll see an error. If you want to run with the
-reference implementations anyway, you can override the error by specifying
-`GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK` as a build define.
+When building for x86, it's very important to pass `-msse4.1` to the compiler,
+otherwise gemmlowp will use slow reference code. Bazel users can compile by
+running `bazel build --copt=-msse4.1 //gemmlowp:all`. The compiled binary should
+work on all Intel CPUs since 2008 (including low power microarchitectures) as
+well as AMD CPUs since 2011.
+
+Please note when compiling binaries that don't need to be distributed, it's
+generally a better idea to pass `-march=native` to the compiler. That flag
+implies `-msse4.1` flag, along with others that might be helpful. This of course
+assumes the host machine supports those instructions. Bazel users should prefer
+to run `bazel build --config=opt //gemmlowp:all` instead.
Details of what it takes to make an efficient port of gemmlowp, namely writing a
suitable GEMM kernel and accompanying packing code, are explained in this file:
--- /dev/null
+# Gemmlowp CMake file written for Debian.
+# Copyright © 2016 Zhou Mo <cdluminate@gmail.com>
+# Licence Apache-2.0
+
+cmake_minimum_required(VERSION 3.7)
+
+# Project
+project(gemmlowp C CXX)
+
+# Set C++11 as default standard
+set(CMAKE_CXX_STANDARD 11)
+
+get_filename_component(gemmlowp_src ${gemmlowp_SOURCE_DIR} PATH)
+
+if(WIN32)
+ # one can enable simd from the cmake command line, ie -DCMAKE_CXX_FLAGS="/arch:AVX2
+ add_definitions(-DNOMINMAX -DWIN64 -DWIN32_LEAN_AND_MEAN -DNOGDI)
+ add_definitions(/bigobj /nologo /EHsc /GF /MP /Gm- /wd4800 /wd4805 /wd4244)
+ if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ # if we compile for windows with clang, allow inline asm
+ add_definitions(-DGEMMLOWP_ALLOW_INLINE_ASM)
+ endif()
+else()
+ set(EXTERNAL_LIBRARIES "pthread")
+endif()
+
+# Glob header files
+file(GLOB gemmlowp_private_headers "${gemmlowp_src}/fixedpoint/*.h" "${gemmlowp_src}/internal/*.h")
+file(GLOB gemmlowp_public_headers "${gemmlowp_src}/meta/*.h" "${gemmlowp_src}/public/*.h" "${gemmlowp_src}/profiling/*.h")
+list(APPEND gemmlowp_headers ${gemmlowp_private_headers} ${gemmlowp_public_headers})
+
+file(GLOB eight_bit_int_gemm_headers "${gemmlowp_src}/eight_bit_int_gemm/*.h")
+list(APPEND eight_bit_int_gemm_public_headers ${eight_bit_int_gemm_headers} ${gemmlowp_public_headers})
+file(GLOB eight_bit_int_gemm_sources_with_no_headers "${gemmlowp_src}/eight_bit_int_gemm/*.cc")
+
+list(APPEND eight_bit_int_gemm_sources
+ ${eight_bit_int_gemm_headers}
+ ${eight_bit_int_gemm_sources_with_no_headers}
+ ${gemmlowp_headers})
+
+file(GLOB gemmlowp_test_headers "${gemmlowp_src}/test/*.h")
+list(APPEND gemmlowp_test_headers ${gemmlowp_headers})
+
+file(GLOB fixedpoint_private_headers "${gemmlowp_src}/fixedpoint/*.h")
+list(APPEND fixedpoint_private_headers "${gemmlowp_src}/internal/common.h")
+
+# Eight bit int gemm library
+if(WIN32)
+ add_library(eight_bit_int_gemm STATIC ${eight_bit_int_gemm_sources_with_no_headers})
+else()
+ add_library(eight_bit_int_gemm SHARED ${eight_bit_int_gemm_sources_with_no_headers})
+endif()
+target_link_libraries(eight_bit_int_gemm ${EXTERNAL_LIBRARIES})
+
+# Benchmarks
+add_executable(benchmark
+ "${gemmlowp_src}/test/benchmark.cc" ${gemmlowp_test_headers})
+target_link_libraries(benchmark ${EXTERNAL_LIBRARIES})
+
+add_executable(benchmark_all_sizes
+ "${gemmlowp_src}/test/benchmark_all_sizes.cc" ${gemmlowp_test_headers})
+target_compile_options(benchmark_all_sizes PRIVATE -DBENCHMARK_8bit -DBENCHMARK_QUICK)
+target_link_libraries(benchmark_all_sizes ${EXTERNAL_LIBRARIES})
+
+# Gemmlowp test
+add_executable(test_gemmlowp
+ "${gemmlowp_src}/test/test.cc" "${gemmlowp_src}/test/test_data.cc" ${gemmlowp_test_headers})
+target_link_libraries(test_gemmlowp eight_bit_int_gemm)
+
+# Math helpers test
+add_executable(test_math_helpers
+ "${gemmlowp_src}/test/test_math_helpers.cc" ${gemmlowp_test_headers})
+
+# BlockingCounter test
+add_executable(test_blocking_counter
+ "${gemmlowp_src}/test/test_blocking_counter.cc" ${gemmlowp_test_headers})
+target_link_libraries(test_blocking_counter ${EXTERNAL_LIBRARIES})
+
+# Allocator test
+add_executable(test_allocator
+ "${gemmlowp_src}/test/test_allocator.cc" ${gemmlowp_test_headers})
+
+# FixedPoint test
+add_executable(test_fixedpoint
+ "${gemmlowp_src}/test/test_fixedpoint.cc" ${gemmlowp_test_headers})
+
+# Add tests
+enable_testing()
+foreach(testname "test_math_helpers" "test_blocking_counter" "test_allocator" "test_fixedpoint" "test_gemmlowp")
+ add_test(NAME ${testname} COMMAND "${testname}")
+endforeach(testname)
to be performed on internal 32bit accumulators to obtain the final outputs.
The public entry point in [public/gemmlowp.h](../public/gemmlowp.h) allowing to
-set un an arbitrary output pipeline is `GemmWithOutputPipeline`.
+set up an arbitrary output pipeline is `GemmWithOutputPipeline`.
Refer to [quantization.md](quantization.md) for details of how one gets from
first principles to the actual output pipelines to assemble for successful
quantization paradigms. See [low-precision.md](low-precision.md) and
[quantization.md](quantization.md).
-Besides implementing a quantization paradigms, the other thing that output
-pipelines are good for, is implementing fused operations where a matrix
+Besides implementing a quantization paradigm, the other thing that output
+pipelines is good for, is implementing fused operations where a matrix
multiplication feeds into other operations applied to its result, without
additional array traversals. For instance, when implementing neural network
inference, one might have a Convolutional layer with a bias-addition and an
The specific output pipeline stage implementing the present quantization
paradigm, i.e. implementing the precise computation detailed in the previous
section (equation (5)), is
-`OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint`.
+`OutputStageQuantizeDownInt32ByFixedPoint`.
Please refer to the comment explaining it in
[public/output_stages.h](../public/output_stages.h).
document boils down to the difference between the legacy output stage
implementing it, `OutputStageQuantizeDownInt32ToUint8Scale`, and the new output
stage implementing the new paradigm,
-`OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint`.
+`OutputStageQuantizeDownInt32ByFixedPoint`.
Please refer to the comments in
[public/output_stages.h](../public/output_stages.h) for details about these two
1. The int32 accumulators (inputs to the output stage) undergo a plain int32
multiplication with a int32 multiplier, which may overflow. By contrast, in
- the newer `OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint`, this
+ the newer `OutputStageQuantizeDownInt32ByFixedPoint`, this
integer multiplication becomes a fixed-point multiplication and cannot
overflow.
//
// This is how to obtain the fixed-point multiplier and right shift
// parameters to pass to
-// OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint.
+// OutputStageQuantizeDownInt32ByFixedPoint.
//
// Note: all this code only needs to run offline to generate the quantized
// neural network workload, not at runtime on the
<< "use quantized arithmetic.\n"
<< std::endl;
- gemmlowp::OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint
+ gemmlowp::OutputStageQuantizeDownInt32ByFixedPoint
quantize_down_stage;
quantize_down_stage.result_offset_after_shift = result_offset;
quantize_down_stage.result_fixedpoint_multiplier = quantized_multiplier;
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
-#define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
-#endif
#include "eight_bit_int_gemm.h"
#include <memory>
static const int kLanes = 1;
};
+template <>
+struct FixedPointRawTypeTraits<std::int16_t> {
+ typedef std::int16_t ScalarRawType;
+ static const int kLanes = 1;
+};
+
// Returns a SIMD value duplicating a scalar value across all lanes.
template <typename tRawType>
tRawType Dup(typename FixedPointRawTypeTraits<tRawType>::ScalarRawType x) {
return static_cast<std::int32_t>((sum + sign) / 2);
}
+template <>
+inline std::int16_t RoundingHalfSum(std::int16_t a, std::int16_t b) {
+ std::int32_t a32 = a;
+ std::int32_t b32 = b;
+ std::int32_t sum = a32 + b32;
+ std::int32_t sign = sum >= 0 ? 1 : -1;
+ return static_cast<std::int16_t>((sum + sign) / 2);
+}
+
+template <typename IntegerType>
+IntegerType SaturatingAdd(IntegerType a, IntegerType b) {
+ static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
+ return a;
+}
+
+// So far this is only needed for int16.
+template <>
+inline std::int16_t SaturatingAdd(std::int16_t a, std::int16_t b) {
+ std::int32_t a32 = a;
+ std::int32_t b32 = b;
+ std::int32_t sum = a32 + b32;
+ return static_cast<std::int16_t>(std::min(32767, std::max(-32768, sum)));
+}
+
+// Returns a+b, saturating if the integers are 16bit or narrower,
+// otherwise just a plain addition.
+template <typename IntegerType, bool Is16Bit>
+struct AddSaturatingIf16BitImpl {
+ static IntegerType Run(IntegerType a, IntegerType b) { return Add(a, b); }
+};
+template <typename IntegerType>
+struct AddSaturatingIf16BitImpl<IntegerType, true> {
+ static IntegerType Run(IntegerType a, IntegerType b) {
+ return SaturatingAdd(a, b);
+ }
+};
+template <typename IntegerType>
+IntegerType AddSaturatingIf16Bit(IntegerType a, IntegerType b) {
+ using ScalarType =
+ typename FixedPointRawTypeTraits<IntegerType>::ScalarRawType;
+ return AddSaturatingIf16BitImpl<IntegerType, sizeof(ScalarType) == 2>::Run(a,
+ b);
+}
+
// Returns the integer that represents the product of two fixed-point
// numbers, interpreting all integers as fixed-point values in the
// interval [-1, 1), rounding to the nearest value, and saturating
return overflow ? std::numeric_limits<std::int32_t>::max() : ab_x2_high32;
}
+template <>
+inline std::int16_t SaturatingRoundingDoublingHighMul(std::int16_t a,
+ std::int16_t b) {
+ bool overflow = a == b && a == std::numeric_limits<std::int16_t>::min();
+ std::int32_t a_32(a);
+ std::int32_t b_32(b);
+ std::int32_t ab_32 = a_32 * b_32;
+ std::int16_t nudge = ab_32 >= 0 ? (1 << 14) : (1 - (1 << 14));
+ std::int16_t ab_x2_high16 =
+ static_cast<std::int16_t>((ab_32 + nudge) / (1 << 15));
+ return overflow ? std::numeric_limits<std::int16_t>::max() : ab_x2_high16;
+}
+
// Correctly-rounded-to-nearest division by a power-of-two.
// Also known as a rounding arithmetic right shift.
template <typename IntegerType>
inline IntegerType RoundingDivideByPOT(IntegerType x, int exponent) {
- using ScalarIntegerType =
- typename FixedPointRawTypeTraits<IntegerType>::ScalarRawType;
- static_assert(std::is_same<ScalarIntegerType, std::int32_t>::value,
- "Currently only supporting int32 scalar and SIMD types");
assert(exponent >= 0);
assert(exponent <= 31);
const IntegerType mask = Dup<IntegerType>((1ll << exponent) - 1);
static IntegerType eval(IntegerType x) {
using ScalarIntegerType =
typename FixedPointRawTypeTraits<IntegerType>::ScalarRawType;
- static_assert(std::is_same<ScalarIntegerType, std::int32_t>::value,
- "Currently only supporting int32 scalar and SIMD types");
const IntegerType min =
- Dup<IntegerType>(std::numeric_limits<std::int32_t>::min());
+ Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::min());
const IntegerType max =
- Dup<IntegerType>(std::numeric_limits<std::int32_t>::max());
+ Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::max());
+ const int ScalarIntegerTypeBits = 8 * sizeof(ScalarIntegerType);
- const std::int32_t threshold = ((1 << (31 - Exponent)) - 1);
+ const std::int32_t threshold =
+ ((1 << (ScalarIntegerTypeBits - 1 - Exponent)) - 1);
const IntegerType positive_mask =
MaskIfGreaterThan(x, Dup<IntegerType>(threshold));
const IntegerType negative_mask =
static FixedPoint Zero() { return FromScalarRaw(0); }
static FixedPoint One() {
- return FromScalarRaw(kIntegerBits == 0
- ? ScalarRawMax()
- : (ScalarRawType(1) << kFractionalBits));
+ return FromScalarRaw(
+ kIntegerBits == 0
+ ? ScalarRawMax()
+ : (ScalarRawType(1) << (kIntegerBits == 0 ? 0 : kFractionalBits)));
}
static FixedPoint FromDouble(double x) {
const double min_bound = static_cast<double>(ScalarRawMin());
const double max_bound = static_cast<double>(ScalarRawMax());
- return FromScalarRaw(static_cast<std::int32_t>(std::min(
+ return FromScalarRaw(static_cast<ScalarRawType>(std::min(
std::max(round(x * static_cast<double>(1ll << kFractionalBits)),
min_bound),
max_bound)));
return !(a == b);
}
+template <typename tRawType, int tIntegerBits>
+FixedPoint<tRawType, tIntegerBits> SaturatingAdd(
+ FixedPoint<tRawType, tIntegerBits> a,
+ FixedPoint<tRawType, tIntegerBits> b) {
+ return FixedPoint<tRawType, tIntegerBits>::FromRaw(
+ SaturatingAdd(a.raw(), b.raw()));
+}
+
+template <typename tRawType, int tIntegerBits>
+FixedPoint<tRawType, tIntegerBits> AddSaturatingIf16Bit(
+ FixedPoint<tRawType, tIntegerBits> a,
+ FixedPoint<tRawType, tIntegerBits> b) {
+ return FixedPoint<tRawType, tIntegerBits>::FromRaw(
+ AddSaturatingIf16Bit(a.raw(), b.raw()));
+}
+
// Conversion to floating-point.
template <typename tRawType, int tIntegerBits>
double ToDouble(FixedPoint<tRawType, tIntegerBits> x) {
// initialized as real numbers, in a way that does not compile floating-point
// arithmetic in production code, yet still checks agreement with the
// floating-point expressions when asserts are enabled.
+//
+// The raw integer value provided is always a int32, encoding a 32-bit
+// fixed-point value, regardless of the actual Scalar type. This allows
+// writing generic code that applies just as well to the 32-bit and 16-bit
+// cases. In the 16-bit case, the raw integer value is internally
+// rounding-shifted by 16 bits to the right.
+template <typename FixedPointType>
+inline typename FixedPointType::ScalarRawType RescaleConstantInitializer(
+ std::int32_t int32_value) {
+ typedef typename FixedPointType::ScalarRawType ScalarRawType;
+ static constexpr int ScalarTypeBits = 8 * sizeof(ScalarRawType);
+ return static_cast<ScalarRawType>(
+ RoundingDivideByPOT<std::int32_t>(int32_value, 32 - ScalarTypeBits));
+}
#ifdef GEMMLOWP_ENABLE_FIXEDPOINT_CONSTANTS_CHECKS
template <typename FixedPointType>
-FixedPointType CheckedFixedPointConstant(
- typename FixedPointType::ScalarRawType raw_value, double double_value) {
- typedef typename FixedPointType::RawType RawType;
+FixedPointType CheckedFixedPointConstant(std::int32_t raw_value,
+ double double_value) {
const FixedPointType result = FixedPointType::FromScalarRaw(raw_value);
assert(result == FixedPointType::FromDouble(double_value));
return result;
}
-#define GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPointType, ScalarRawValue, \
- DoubleValue) \
- (CheckedFixedPointConstant<FixedPointType>(ScalarRawValue, DoubleValue))
+#define GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPointType, \
+ ScalarRawInt32Value, DoubleValue) \
+ (gemmlowp::CheckedFixedPointConstant<FixedPointType>( \
+ gemmlowp::RescaleConstantInitializer<FixedPointType>( \
+ ScalarRawInt32Value), \
+ DoubleValue))
#else
-#define GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPointType, ScalarRawValue, \
- DoubleValue) \
- (FixedPointType::FromScalarRaw(ScalarRawValue))
+#define GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPointType, \
+ ScalarRawInt32Value, DoubleValue) \
+ (FixedPointType::FromScalarRaw( \
+ gemmlowp::RescaleConstantInitializer<FixedPointType>( \
+ ScalarRawInt32Value)))
#endif
// Implementation of exponential function.
F x4_over_24_plus_x3_over_6_plus_x2_over_2 =
SaturatingRoundingMultiplyByPOT<-1>(
((x4_over_4 + x3) * constant_1_over_3) + x2);
- return constant_term +
- constant_term * (x + x4_over_24_plus_x3_over_6_plus_x2_over_2);
+ return AddSaturatingIf16Bit(
+ constant_term,
+ constant_term * (x + x4_over_24_plus_x3_over_6_plus_x2_over_2));
}
// Returns exp(x) for x < 0.
#undef GEMMLOWP_EXP_BARREL_SHIFTER
if (kIntegerBits > 5) {
- static const int b = kIntegerBits > 5 ? kFractionalBits + 5 : 0;
+ static const int b = kIntegerBits > 5 ? 36 - kIntegerBits : 0;
const InputF clamp =
GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(InputF, -(1 << b), -32.0);
result = SelectUsingMask(MaskIfLessThan(a, clamp), ResultF::Zero(), result);
#include "./fixedpoint_neon.h"
#elif defined(GEMMLOWP_SSE4)
#include "./fixedpoint_sse.h"
+#elif defined(GEMMLOWP_MSA)
+#include "./fixedpoint_msa.h"
#endif
#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_H_
--- /dev/null
+// Copyright 2018 The Gemmlowp Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// fixedpoint_msa.h: optimized MSA specializations of the templates
+// in fixedpoint.h.
+
+#ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_MSA_H_
+#define GEMMLOWP_INTERNAL_FIXEDPOINT_MSA_H_
+
+#include <msa.h>
+
+namespace gemmlowp {
+
+template <>
+struct FixedPointRawTypeTraits<v4i32> {
+ typedef std::int32_t ScalarRawType;
+ static const int kLanes = 4;
+};
+
+template <>
+struct FixedPointRawTypeTraits<v8i16> {
+ typedef std::int16_t ScalarRawType;
+ static const int kLanes = 8;
+};
+
+template <>
+inline v4i32 BitAnd(v4i32 a, v4i32 b) {
+ return reinterpret_cast<v4i32>(__builtin_msa_and_v(reinterpret_cast<v16u8>(a),
+ reinterpret_cast<v16u8>(b)));
+}
+
+template <>
+inline v8i16 BitAnd(v8i16 a, v8i16 b) {
+ return reinterpret_cast<v8i16>(__builtin_msa_and_v(reinterpret_cast<v16u8>(a),
+ reinterpret_cast<v16u8>(b)));
+}
+
+template <>
+inline v4i32 BitOr(v4i32 a, v4i32 b) {
+ return reinterpret_cast<v4i32>(__builtin_msa_or_v(reinterpret_cast<v16u8>(a),
+ reinterpret_cast<v16u8>(b)));
+}
+
+template <>
+inline v8i16 BitOr(v8i16 a, v8i16 b) {
+ return reinterpret_cast<v8i16>(__builtin_msa_or_v(reinterpret_cast<v16u8>(a),
+ reinterpret_cast<v16u8>(b)));
+}
+
+template <>
+inline v4i32 BitXor(v4i32 a, v4i32 b) {
+ return reinterpret_cast<v4i32>(__builtin_msa_xor_v(reinterpret_cast<v16u8>(a),
+ reinterpret_cast<v16u8>(b)));
+}
+
+template <>
+inline v8i16 BitXor(v8i16 a, v8i16 b) {
+ return reinterpret_cast<v8i16>(__builtin_msa_xor_v(reinterpret_cast<v16u8>(a),
+ reinterpret_cast<v16u8>(b)));
+}
+
+template <>
+inline v4i32 BitNot(v4i32 a) {
+ return reinterpret_cast<v4i32>(__builtin_msa_nor_v(reinterpret_cast<v16u8>(a),
+ reinterpret_cast<v16u8>(a)));
+}
+
+template <>
+inline v8i16 BitNot(v8i16 a) {
+ return reinterpret_cast<v8i16>(__builtin_msa_nor_v(reinterpret_cast<v16u8>(a),
+ reinterpret_cast<v16u8>(a)));
+}
+
+template <>
+inline v4i32 Add(v4i32 a, v4i32 b) {
+ return __builtin_msa_addv_w(a, b);
+}
+
+template <>
+inline v8i16 Add(v8i16 a, v8i16 b) {
+ return __builtin_msa_addv_h(a, b);
+}
+
+template <>
+inline v4i32 Sub(v4i32 a, v4i32 b) {
+ return __builtin_msa_subv_w(a, b);
+}
+
+template <>
+inline v8i16 Sub(v8i16 a, v8i16 b) {
+ return __builtin_msa_subv_h(a, b);
+}
+
+template <>
+inline v4i32 Neg(v4i32 a) {
+ v4i32 zeroes = __builtin_msa_ldi_w(0);
+ return __builtin_msa_subv_w(zeroes, a);
+}
+
+template <>
+inline v8i16 Neg(v8i16 a) {
+ v8i16 zeroes = __builtin_msa_ldi_h(0);
+ return __builtin_msa_subv_h(zeroes, a);
+}
+
+template <>
+inline v4i32 ShiftLeft(v4i32 a, int offset) {
+ return __builtin_msa_sll_w(a, __builtin_msa_fill_w(offset));
+}
+
+template <>
+inline v8i16 ShiftLeft(v8i16 a, int offset) {
+ return __builtin_msa_sll_h(a, __builtin_msa_fill_h(offset));
+}
+
+template <>
+inline v4i32 ShiftRight(v4i32 a, int offset) {
+ return __builtin_msa_sra_w(a, __builtin_msa_fill_w(offset));
+}
+
+template <>
+inline v8i16 ShiftRight(v8i16 a, int offset) {
+ return __builtin_msa_sra_h(a, __builtin_msa_fill_h(offset));
+}
+
+template <>
+inline v4i32 SelectUsingMask(v4i32 if_mask, v4i32 then_val, v4i32 else_val) {
+ if_mask = reinterpret_cast<v4i32>(__builtin_msa_bsel_v(reinterpret_cast<v16u8>(if_mask),
+ reinterpret_cast<v16u8>(else_val),
+ reinterpret_cast<v16u8>(then_val)));
+ return if_mask;
+}
+
+template <>
+inline v8i16 SelectUsingMask(v8i16 if_mask, v8i16 then_val, v8i16 else_val) {
+ if_mask = reinterpret_cast<v8i16>(__builtin_msa_bsel_v(reinterpret_cast<v16u8>(if_mask),
+ reinterpret_cast<v16u8>(else_val),
+ reinterpret_cast<v16u8>(then_val)));
+ return if_mask;
+}
+
+template <>
+inline v4i32 MaskIfEqual(v4i32 a, v4i32 b) {
+ return __builtin_msa_ceq_w(a, b);
+}
+
+template <>
+inline v8i16 MaskIfEqual(v8i16 a, v8i16 b) {
+ return __builtin_msa_ceq_h(a, b);
+}
+
+template <>
+inline v4i32 MaskIfNotEqual(v4i32 a, v4i32 b) {
+ return BitNot(MaskIfEqual(a, b));
+}
+
+template <>
+inline v8i16 MaskIfNotEqual(v8i16 a, v8i16 b) {
+ return BitNot(MaskIfEqual(a, b));
+}
+
+template <>
+inline v4i32 MaskIfZero(v4i32 a) {
+ return __builtin_msa_ceqi_w(a, 0);
+}
+
+template <>
+inline v8i16 MaskIfZero(v8i16 a) {
+ return __builtin_msa_ceqi_h(a, 0);
+}
+
+template <>
+inline v4i32 MaskIfNonZero(v4i32 a) {
+ return BitNot(MaskIfZero(a));
+}
+
+template <>
+inline v8i16 MaskIfNonZero(v8i16 a) {
+ return BitNot(MaskIfZero(a));
+}
+
+template <>
+inline v4i32 MaskIfGreaterThan(v4i32 a, v4i32 b) {
+ return __builtin_msa_clt_s_w(b, a);
+}
+
+template <>
+inline v8i16 MaskIfGreaterThan(v8i16 a, v8i16 b) {
+ return __builtin_msa_clt_s_h(b, a);
+}
+
+template <>
+inline v4i32 MaskIfGreaterThanOrEqual(v4i32 a, v4i32 b) {
+ return __builtin_msa_cle_s_w(b, a);
+}
+
+template <>
+inline v8i16 MaskIfGreaterThanOrEqual(v8i16 a, v8i16 b) {
+ return __builtin_msa_cle_s_h(b, a);
+}
+
+template <>
+inline v4i32 MaskIfLessThan(v4i32 a, v4i32 b) {
+ return __builtin_msa_clt_s_w(a, b);
+}
+
+template <>
+inline v8i16 MaskIfLessThan(v8i16 a, v8i16 b) {
+ return __builtin_msa_clt_s_h(a, b);
+}
+
+template <>
+inline v4i32 MaskIfLessThanOrEqual(v4i32 a, v4i32 b) {
+ return __builtin_msa_cle_s_w(a, b);
+}
+
+template <>
+inline v8i16 MaskIfLessThanOrEqual(v8i16 a, v8i16 b) {
+ return __builtin_msa_cle_s_h(a, b);
+}
+
+template <>
+inline bool All(v4i32 a) {
+ return __builtin_msa_bz_v(reinterpret_cast<v16u8>(BitNot(a)));
+}
+
+template <>
+inline bool All(v8i16 a) {
+ return __builtin_msa_bz_v(reinterpret_cast<v16u8>(BitNot(a)));
+}
+
+template <>
+inline bool Any(v4i32 a) {
+ return __builtin_msa_bnz_v(reinterpret_cast<v16u8>(a));
+}
+
+template <>
+inline bool Any(v8i16 a) {
+ return __builtin_msa_bnz_v(reinterpret_cast<v16u8>(a));
+}
+
+template <>
+inline v4i32 RoundingHalfSum(v4i32 a, v4i32 b) {
+ return __builtin_msa_aver_s_w(a, b);
+}
+
+template <>
+inline v8i16 RoundingHalfSum(v8i16 a, v8i16 b) {
+ return __builtin_msa_aver_s_h(a, b);
+}
+
+template <>
+inline v4i32 SaturatingRoundingDoublingHighMul(v4i32 a, v4i32 b) {
+ return __builtin_msa_mulr_q_w(a, b);
+}
+
+template <>
+inline v8i16 SaturatingRoundingDoublingHighMul(v8i16 a, v8i16 b) {
+ return __builtin_msa_mulr_q_h(a, b);
+}
+
+template <int Exponent>
+struct ImplSaturatingRoundingMultiplyByPOT<Exponent, v4i32, 1> {
+ static v4i32 eval(v4i32 x) {
+ static_assert(Exponent >= 0 && Exponent < 32, "");
+ if (Exponent < 5) {
+ for (int i = 0; i < Exponent; i++) {
+ x = __builtin_msa_adds_s_w(x, x);
+ }
+ return x;
+ } else {
+ // Saturate each signed 32-bit element to (32 - Exponent)
+ // bits (this takes full care of negative elements).
+ v4i32 res = __builtin_msa_sat_s_w(x, 31 - Exponent);
+ // Set tmp to 0x7FFFFFFF for those elements which staturated
+ // to smaller (positive) values and 0 for all others.
+ v4i32 tmp = __builtin_msa_srli_w(__builtin_msa_clt_s_w(res, x), 1);
+ // Shift the saturated elements. The positive saturated elements
+ // will have Exponent trailing zero bits after the shift. Those
+ // need to be ones, not zeroes.
+ res = __builtin_msa_slli_w(res, Exponent);
+ // Finally, set those trailing zero bits to ones.
+ res = reinterpret_cast<v4i32>(__builtin_msa_or_v(reinterpret_cast<v16u8>(res),
+ reinterpret_cast<v16u8>(tmp)));
+ return res;
+ }
+ }
+};
+
+template <int Exponent>
+struct ImplSaturatingRoundingMultiplyByPOT<Exponent, v8i16, 1> {
+ static v8i16 eval(v8i16 x) {
+ static_assert(Exponent >= 0 && Exponent < 16, "");
+ if (Exponent < 5) {
+ for (int i = 0; i < Exponent; i++) {
+ x = __builtin_msa_adds_s_h(x, x);
+ }
+ return x;
+ } else {
+ // Saturate each signed 16-bit element to (16 - Exponent)
+ // bits (this takes full care of negative elements).
+ v8i16 res = __builtin_msa_sat_s_h(x, 15 - Exponent);
+ // Set tmp to 0x7FFF for those elements which staturated
+ // to smaller (positive) values and 0 for all others.
+ v8i16 tmp = __builtin_msa_srli_h(__builtin_msa_clt_s_h(res, x), 1);
+ // Shift the saturated elements. The positive saturated elements
+ // will have Exponent trailing zero bits after the shift. Those
+ // need to be ones, not zeroes.
+ res = __builtin_msa_slli_h(res, Exponent);
+ // Finally, set those trailing zero bits to ones.
+ res = reinterpret_cast<v8i16>(__builtin_msa_or_v(reinterpret_cast<v16u8>(res),
+ reinterpret_cast<v16u8>(tmp)));
+ return res;
+ }
+ }
+};
+
+// TODO: possibly implement:
+// template <> v4i32 RoundingDivideByPOT(v4i32, int)
+// template <> v8i16 RoundingDivideByPOT(v8i16, int)
+// template <int Exponent> struct ImplSaturatingRoundingMultiplyByPOT<Exponent, v4i32, -1>
+// template <int Exponent> struct ImplSaturatingRoundingMultiplyByPOT<Exponent, v8i16, -1>
+
+template <>
+inline v4i32 Dup<v4i32>(std::int32_t x) {
+ return __builtin_msa_fill_w(x);
+}
+
+template <>
+inline v8i16 Dup<v8i16>(std::int16_t x) {
+ return __builtin_msa_fill_h(x);
+}
+
+// So far this is only needed for int16.
+template <>
+inline v8i16 SaturatingAdd(v8i16 a, v8i16 b) {
+ return __builtin_msa_adds_s_h(a, b);
+ return a;
+}
+
+} // end namespace gemmlowp
+
+#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_MSA_H_
static const int kLanes = 4;
};
+template <>
+struct FixedPointRawTypeTraits<int16x8_t> {
+ typedef std::int16_t ScalarRawType;
+ static const int kLanes = 8;
+};
+
template <>
inline int32x4_t BitAnd(int32x4_t a, int32x4_t b) {
return vandq_s32(a, b);
}
+template <>
+inline int16x8_t BitAnd(int16x8_t a, int16x8_t b) {
+ return vandq_s16(a, b);
+}
+
template <>
inline int32x4_t BitOr(int32x4_t a, int32x4_t b) {
return vorrq_s32(a, b);
}
+template <>
+inline int16x8_t BitOr(int16x8_t a, int16x8_t b) {
+ return vorrq_s16(a, b);
+}
+
template <>
inline int32x4_t BitXor(int32x4_t a, int32x4_t b) {
return veorq_s32(a, b);
}
+template <>
+inline int16x8_t BitXor(int16x8_t a, int16x8_t b) {
+ return veorq_s16(a, b);
+}
+
template <>
inline int32x4_t BitNot(int32x4_t a) {
return veorq_s32(a, vdupq_n_s32(-1));
}
+template <>
+inline int16x8_t BitNot(int16x8_t a) {
+ return veorq_s16(a, vdupq_n_s16(-1));
+}
+
template <>
inline int32x4_t Add(int32x4_t a, int32x4_t b) {
return vaddq_s32(a, b);
}
+template <>
+inline int16x8_t Add(int16x8_t a, int16x8_t b) {
+ return vaddq_s16(a, b);
+}
+
template <>
inline int32x4_t Sub(int32x4_t a, int32x4_t b) {
return vsubq_s32(a, b);
}
+template <>
+inline int16x8_t Sub(int16x8_t a, int16x8_t b) {
+ return vsubq_s16(a, b);
+}
+
template <>
inline int32x4_t Neg(int32x4_t a) {
return vnegq_s32(a);
}
+template <>
+inline int16x8_t Neg(int16x8_t a) {
+ return vnegq_s16(a);
+}
+
template <>
inline int32x4_t ShiftLeft(int32x4_t a, int offset) {
return vshlq_s32(a, vdupq_n_s32(offset));
}
+template <>
+inline int16x8_t ShiftLeft(int16x8_t a, int offset) {
+ return vshlq_s16(a, vdupq_n_s16(offset));
+}
+
template <>
inline int32x4_t ShiftRight(int32x4_t a, int offset) {
return vshlq_s32(a, vdupq_n_s32(-offset));
}
+template <>
+inline int16x8_t ShiftRight(int16x8_t a, int offset) {
+ return vshlq_s16(a, vdupq_n_s16(-offset));
+}
+
template <>
inline int32x4_t SelectUsingMask(int32x4_t if_mask, int32x4_t then_val,
int32x4_t else_val) {
return vbslq_s32(vreinterpretq_u32_s32(if_mask), then_val, else_val);
}
+template <>
+inline int16x8_t SelectUsingMask(int16x8_t if_mask, int16x8_t then_val,
+ int16x8_t else_val) {
+ return vbslq_s16(vreinterpretq_u16_s16(if_mask), then_val, else_val);
+}
+
template <>
inline int32x4_t MaskIfEqual(int32x4_t a, int32x4_t b) {
return vreinterpretq_s32_u32(vceqq_s32(a, b));
}
+template <>
+inline int16x8_t MaskIfEqual(int16x8_t a, int16x8_t b) {
+ return vreinterpretq_s16_u16(vceqq_s16(a, b));
+}
+
template <>
inline int32x4_t MaskIfNotEqual(int32x4_t a, int32x4_t b) {
return BitNot(MaskIfEqual(a, b));
}
+template <>
+inline int16x8_t MaskIfNotEqual(int16x8_t a, int16x8_t b) {
+ return BitNot(MaskIfEqual(a, b));
+}
+
template <>
inline int32x4_t MaskIfZero(int32x4_t a) {
return MaskIfEqual(a, vdupq_n_s32(0));
}
+template <>
+inline int16x8_t MaskIfZero(int16x8_t a) {
+ return MaskIfEqual(a, vdupq_n_s16(0));
+}
+
template <>
inline int32x4_t MaskIfNonZero(int32x4_t a) {
return vreinterpretq_s32_u32(vtstq_s32(a, a));
}
+template <>
+inline int16x8_t MaskIfNonZero(int16x8_t a) {
+ return vreinterpretq_s16_u16(vtstq_s16(a, a));
+}
+
template <>
inline int32x4_t MaskIfGreaterThan(int32x4_t a, int32x4_t b) {
return vreinterpretq_s32_u32(vcgtq_s32(a, b));
}
+template <>
+inline int16x8_t MaskIfGreaterThan(int16x8_t a, int16x8_t b) {
+ return vreinterpretq_s16_u16(vcgtq_s16(a, b));
+}
+
template <>
inline int32x4_t MaskIfGreaterThanOrEqual(int32x4_t a, int32x4_t b) {
return vreinterpretq_s32_u32(vcgeq_s32(a, b));
}
+template <>
+inline int16x8_t MaskIfGreaterThanOrEqual(int16x8_t a, int16x8_t b) {
+ return vreinterpretq_s16_u16(vcgeq_s16(a, b));
+}
+
template <>
inline int32x4_t MaskIfLessThan(int32x4_t a, int32x4_t b) {
return vreinterpretq_s32_u32(vcltq_s32(a, b));
}
+template <>
+inline int16x8_t MaskIfLessThan(int16x8_t a, int16x8_t b) {
+ return vreinterpretq_s16_u16(vcltq_s16(a, b));
+}
+
template <>
inline int32x4_t MaskIfLessThanOrEqual(int32x4_t a, int32x4_t b) {
return vreinterpretq_s32_u32(vcleq_s32(a, b));
}
+template <>
+inline int16x8_t MaskIfLessThanOrEqual(int16x8_t a, int16x8_t b) {
+ return vreinterpretq_s16_u16(vcleq_s16(a, b));
+}
+
template <>
inline bool All(int32x4_t a) {
a = vandq_s32(a, vextq_s32(a, a, 1));
return vgetq_lane_s32(a, 0);
}
+template <>
+inline bool All(int16x8_t a) {
+ a = vandq_s16(a, vextq_s16(a, a, 1));
+ a = vandq_s16(a, vextq_s16(a, a, 2));
+ a = vandq_s16(a, vextq_s16(a, a, 4));
+ return vgetq_lane_s16(a, 0);
+}
+
template <>
inline bool Any(int32x4_t a) {
a = vorrq_s32(a, vextq_s32(a, a, 1));
return vgetq_lane_s32(a, 0);
}
+template <>
+inline bool Any(int16x8_t a) {
+ a = vorrq_s16(a, vextq_s16(a, a, 1));
+ a = vorrq_s16(a, vextq_s16(a, a, 2));
+ a = vorrq_s16(a, vextq_s16(a, a, 4));
+ return vgetq_lane_s16(a, 0);
+}
+
template <>
inline int32x4_t RoundingHalfSum(int32x4_t a, int32x4_t b) {
return vrhaddq_s32(a, b);
}
+template <>
+inline int16x8_t RoundingHalfSum(int16x8_t a, int16x8_t b) {
+ return vrhaddq_s16(a, b);
+}
+
template <>
inline int32x4_t SaturatingRoundingDoublingHighMul(int32x4_t a, int32x4_t b) {
return vqrdmulhq_s32(a, b);
}
+template <>
+inline int16x8_t SaturatingRoundingDoublingHighMul(int16x8_t a, int16x8_t b) {
+ return vqrdmulhq_s16(a, b);
+}
+
template <>
inline int32x4_t RoundingDivideByPOT(int32x4_t x, int exponent) {
const int32x4_t shift_vec = vdupq_n_s32(-exponent);
return vrshlq_s32(fixed_up_x, shift_vec);
}
+template <>
+inline int16x8_t RoundingDivideByPOT(int16x8_t x, int exponent) {
+ const int16x8_t shift_vec = vdupq_n_s16(-exponent);
+ const int16x8_t fixup = vshrq_n_s16(vandq_s16(x, shift_vec), 15);
+ const int16x8_t fixed_up_x = vqaddq_s16(x, fixup);
+ return vrshlq_s16(fixed_up_x, shift_vec);
+}
+
template <int Exponent>
struct ImplSaturatingRoundingMultiplyByPOT<Exponent, int32x4_t, 1> {
static int32x4_t eval(int32x4_t x) { return vqshlq_n_s32(x, Exponent); }
}
};
+template <int Exponent>
+struct ImplSaturatingRoundingMultiplyByPOT<Exponent, int16x8_t, 1> {
+ static int16x8_t eval(int16x8_t x) { return vqshlq_n_s16(x, Exponent); }
+};
+
+template <int Exponent>
+struct ImplSaturatingRoundingMultiplyByPOT<Exponent, int16x8_t, -1> {
+ static int16x8_t eval(int16x8_t x) {
+ const int16x8_t fixup = vshrq_n_s16(x, 15);
+ const int16x8_t fixed_up_x = vqaddq_s16(x, fixup);
+ return vrshrq_n_s16(fixed_up_x, -Exponent);
+ }
+};
+
template <>
inline int32x4_t Dup<int32x4_t>(std::int32_t x) {
return vdupq_n_s32(x);
}
+template <>
+inline int16x8_t Dup<int16x8_t>(std::int16_t x) {
+ return vdupq_n_s16(x);
+}
+
+// So far this is only needed for int16.
+template <>
+inline int16x8_t SaturatingAdd(int16x8_t a, int16x8_t b) {
+ return vqaddq_s16(a, b);
+}
+
} // end namespace gemmlowp
#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_NEON_H_
namespace gemmlowp {
+// SSE intrinsics are not finely typed: there is a single __m128i vector
+// type that does not distinguish between "int32x4" and "int16x8" use
+// cases, unlike the NEON equivalents. Because we had initially focused
+// on int32x4, we did not pay attention and specialized these fixedpoint
+// templates directly for __m128i hardcoding the int32x4 semantics,
+// not leaving room for int16x8 semantics. Amending that by adding a separate
+// data type, int16x8_m128i, that wraps __m128i while being a separate
+// type.
+struct int16x8_m128i {
+ int16x8_m128i() {}
+ explicit int16x8_m128i(__m128i w) : v(w) {}
+ ~int16x8_m128i() {}
+
+ __m128i v;
+};
+
template <>
struct FixedPointRawTypeTraits<__m128i> {
typedef std::int32_t ScalarRawType;
static const int kLanes = 4;
};
+template <>
+struct FixedPointRawTypeTraits<int16x8_m128i> {
+ typedef std::int16_t ScalarRawType;
+ static const int kLanes = 8;
+};
+
template <>
inline __m128i BitAnd(__m128i a, __m128i b) {
return _mm_and_si128(a, b);
}
+template <>
+inline int16x8_m128i BitAnd(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_and_si128(a.v, b.v));
+}
+
template <>
inline __m128i BitOr(__m128i a, __m128i b) {
return _mm_or_si128(a, b);
}
+template <>
+inline int16x8_m128i BitOr(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_or_si128(a.v, b.v));
+}
+
template <>
inline __m128i BitXor(__m128i a, __m128i b) {
return _mm_xor_si128(a, b);
}
+template <>
+inline int16x8_m128i BitXor(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_xor_si128(a.v, b.v));
+}
+
template <>
inline __m128i BitNot(__m128i a) {
return _mm_andnot_si128(a, _mm_set1_epi32(-1));
}
+template <>
+inline int16x8_m128i BitNot(int16x8_m128i a) {
+ return int16x8_m128i(_mm_andnot_si128(a.v, _mm_set1_epi16(-1)));
+}
+
template <>
inline __m128i Add(__m128i a, __m128i b) {
return _mm_add_epi32(a, b);
}
+template <>
+inline int16x8_m128i Add(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_add_epi16(a.v, b.v));
+}
+
template <>
inline __m128i Mul(__m128i a, __m128i b) {
return _mm_mullo_epi32(a, b);
}
+template <>
+inline int16x8_m128i Mul(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_mullo_epi16(a.v, b.v));
+}
+
template <>
inline __m128i Sub(__m128i a, __m128i b) {
return _mm_sub_epi32(a, b);
}
+template <>
+inline int16x8_m128i Sub(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_sub_epi16(a.v, b.v));
+}
+
template <>
inline __m128i Neg(__m128i a) {
return _mm_sign_epi32(a, _mm_set1_epi32(-1));
}
+template <>
+inline int16x8_m128i Neg(int16x8_m128i a) {
+ return int16x8_m128i(_mm_sign_epi16(a.v, _mm_set1_epi16(-1)));
+}
+
template <>
inline __m128i ShiftLeft(__m128i a, int offset) {
return _mm_slli_epi32(a, offset);
}
+template <>
+inline int16x8_m128i ShiftLeft(int16x8_m128i a, int offset) {
+ return int16x8_m128i(_mm_slli_epi16(a.v, offset));
+}
+
template <>
inline __m128i ShiftRight(__m128i a, int offset) {
return _mm_srai_epi32(a, offset);
}
+template <>
+inline int16x8_m128i ShiftRight(int16x8_m128i a, int offset) {
+ return int16x8_m128i(_mm_srai_epi16(a.v, offset));
+}
+
template <>
inline __m128i SelectUsingMask(__m128i if_mask, __m128i then_val,
__m128i else_val) {
- return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(else_val),
- _mm_castsi128_ps(then_val),
- _mm_castsi128_ps(if_mask)));
+ // borrowed from Intel's arm_neon_sse.h header.
+ return _mm_or_si128(_mm_and_si128(if_mask, then_val),
+ _mm_andnot_si128(if_mask, else_val));
+}
+
+template <>
+inline int16x8_m128i SelectUsingMask(int16x8_m128i if_mask,
+ int16x8_m128i then_val,
+ int16x8_m128i else_val) {
+ // borrowed from Intel's arm_neon_sse.h header.
+ return int16x8_m128i(SelectUsingMask(if_mask.v, then_val.v, else_val.v));
}
template <>
return _mm_cmpeq_epi32(a, b);
}
+template <>
+inline int16x8_m128i MaskIfEqual(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_cmpeq_epi16(a.v, b.v));
+}
+
template <>
inline __m128i MaskIfNotEqual(__m128i a, __m128i b) {
return BitNot(MaskIfEqual(a, b));
}
+template <>
+inline int16x8_m128i MaskIfNotEqual(int16x8_m128i a, int16x8_m128i b) {
+ return BitNot(MaskIfEqual(a, b));
+}
+
template <>
inline __m128i MaskIfZero(__m128i a) {
return MaskIfEqual(a, _mm_set1_epi32(0));
}
+template <>
+inline int16x8_m128i MaskIfZero(int16x8_m128i a) {
+ return MaskIfEqual(a, int16x8_m128i(_mm_set1_epi16(0)));
+}
+
template <>
inline __m128i MaskIfNonZero(__m128i a) {
return MaskIfNotEqual(a, _mm_set1_epi32(0));
}
+template <>
+inline int16x8_m128i MaskIfNonZero(int16x8_m128i a) {
+ return MaskIfNotEqual(a, int16x8_m128i(_mm_set1_epi16(0)));
+}
+
template <>
inline __m128i MaskIfGreaterThan(__m128i a, __m128i b) {
return _mm_cmpgt_epi32(a, b);
}
+template <>
+inline int16x8_m128i MaskIfGreaterThan(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_cmpgt_epi16(a.v, b.v));
+}
+
template <>
inline __m128i MaskIfLessThan(__m128i a, __m128i b) {
return _mm_cmplt_epi32(a, b);
}
+template <>
+inline int16x8_m128i MaskIfLessThan(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_cmplt_epi16(a.v, b.v));
+}
+
template <>
inline __m128i MaskIfGreaterThanOrEqual(__m128i a, __m128i b) {
return BitNot(MaskIfLessThan(a, b));
}
+template <>
+inline int16x8_m128i MaskIfGreaterThanOrEqual(int16x8_m128i a,
+ int16x8_m128i b) {
+ return BitNot(MaskIfLessThan(a, b));
+}
+
template <>
inline __m128i MaskIfLessThanOrEqual(__m128i a, __m128i b) {
return BitNot(MaskIfGreaterThan(a, b));
}
+template <>
+inline int16x8_m128i MaskIfLessThanOrEqual(int16x8_m128i a, int16x8_m128i b) {
+ return BitNot(MaskIfGreaterThan(a, b));
+}
+
/* Assumptions:
- All and Any are used on masks.
- masks are all_ones for true lanes, all_zeroes otherwise.
return _mm_testc_si128(a, a);
}
+template <>
+inline bool All(int16x8_m128i a) {
+ return _mm_testc_si128(a.v, a.v);
+}
+
template <>
inline bool Any(__m128i a) {
- return BitNot(_mm_testz_si128(a, a));
+ return !_mm_testz_si128(a, a);
+}
+
+template <>
+inline bool Any(int16x8_m128i a) {
+ return !_mm_testz_si128(a.v, a.v);
}
template <>
return result;
}
+template <>
+inline int16x8_m128i RoundingHalfSum(int16x8_m128i a, int16x8_m128i b) {
+ // Idea: go to unsigned to use _mm_avg_epu16,
+ // borrowed from Intel's arm_neon_sse.h header.
+ __m128i constant_neg_32768 = _mm_set1_epi16(-32768);
+ __m128i a_unsigned = _mm_sub_epi16(a.v, constant_neg_32768);
+ __m128i b_unsigned = _mm_sub_epi16(b.v, constant_neg_32768);
+ __m128i avg_unsigned = _mm_avg_epu16(a_unsigned, b_unsigned);
+ __m128i avg = _mm_add_epi16(avg_unsigned, constant_neg_32768);
+ return int16x8_m128i(avg);
+}
+
template <>
inline __m128i SaturatingRoundingDoublingHighMul(__m128i a, __m128i b) {
__m128i min, saturation_mask, a0_a2, a1_a3, b0_b2, b1_b3;
return SelectUsingMask(saturation_mask, min, result);
}
+template <>
+inline int16x8_m128i SaturatingRoundingDoublingHighMul(int16x8_m128i a,
+ int16x8_m128i b) {
+ // Idea: use _mm_mulhrs_epi16 then saturate with a bit-operation,
+ // borrowed from Intel's arm_neon_sse.h header.
+ __m128i result_unsaturated = _mm_mulhrs_epi16(a.v, b.v);
+ __m128i saturation_mask =
+ _mm_cmpeq_epi16(result_unsaturated, _mm_set1_epi16(0x8000));
+ __m128i result = _mm_xor_si128(result_unsaturated, saturation_mask);
+ return int16x8_m128i(result);
+}
+
template <>
inline __m128i Dup<__m128i>(std::int32_t x) {
return _mm_set1_epi32(x);
}
+template <>
+inline int16x8_m128i Dup<int16x8_m128i>(std::int16_t x) {
+ return int16x8_m128i(_mm_set1_epi16(x));
+}
+
+// So far this is only needed for int16.
+template <>
+inline int16x8_m128i SaturatingAdd(int16x8_m128i a, int16x8_m128i b) {
+ return int16x8_m128i(_mm_adds_epi16(a.v, b.v));
+}
+
} // end namespace gemmlowp
#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_
#include "common.h"
-#if defined ANDROID || defined __ANDROID__
-#include <android/api-level.h>
-// The 18 here should be 16, but has to be 18 for now due
-// to a Google-internal issue.
-#if __ANDROID_API__ < 18
-#include <malloc.h>
-#define GEMMLOWP_USE_MEMALIGN
-#endif
-// posix_memalign is missing on some 4.1 x86 devices
-#if __ANDROID_API__ == 18
-#ifdef GEMMLOWP_X86_32
-#include <malloc.h>
-#define GEMMLOWP_USE_MEMALIGN
-#endif
-#endif
-#endif
-
namespace gemmlowp {
enum class TypeId : std::uint8_t { Uint8, Int8, Uint16, Int16, Uint32, Int32 };
if (reserved_bytes_ > storage_size_) {
DeallocateStorage();
storage_size_ = RoundUpToPowerOfTwo(reserved_bytes_);
-#ifdef GEMMLOWP_USE_MEMALIGN
- storage_ = memalign(kAlignment, storage_size_);
-#else
- if (posix_memalign(&storage_, kAlignment, storage_size_)) {
- storage_ = nullptr;
- }
-#endif
+ storage_ = aligned_alloc(kAlignment, storage_size_);
}
ReleaseBuildAssertion(!storage_size_ || storage_, "allocation failure");
private:
void DeallocateStorage() {
assert(!committed_);
- free(storage_);
+ aligned_free(storage_);
storage_size_ = 0;
}
int l2_depth;
template <typename KernelFormat>
- void Init(int rows, int cols, int depth, int num_threads,
- int l1_bytes_to_use, int l2_bytes_to_use, float l2_rhs_factor) {
+ void Init(int rows, int cols, int depth, int num_threads, int l1_bytes_to_use,
+ int l2_bytes_to_use, float l2_rhs_factor) {
FindL2BlockSizes<KernelFormat>(rows, cols, depth, num_threads,
- l2_bytes_to_use, l2_rhs_factor,
- &l2_rows, &l2_cols, &l2_depth);
- FindL1BlockSizes<KernelFormat>(l2_rows, l2_cols, l2_depth,
- l1_bytes_to_use,
+ l2_bytes_to_use, l2_rhs_factor, &l2_rows,
+ &l2_cols, &l2_depth);
+ FindL1BlockSizes<KernelFormat>(l2_rows, l2_cols, l2_depth, l1_bytes_to_use,
&l1_rows, &l1_cols, &l1_depth);
}
int l2_rows = 0;
int l2_cols = 0;
int l2_depth = 0;
+
+ int per_thread_rows =
+ std::max(1, RoundUp<KernelFormat::kRows>(rows) / num_threads);
+
// No L2 blocking in the depth dimension at the moment.
// Too much loss of accuracy due to storing intermediate results in
// low precision.
// dimension concerns only the LHS. Blocking only RHS matrix for L2 enhances
// the performance on x86.
if (l2_rhs_factor == 1.0f) {
- l2_rows = RoundUp<KernelFormat::kRows>(rows);
+ l2_rows = RoundUp<KernelFormat::kRows>(per_thread_rows);
} else {
int max_cache_friendly_l2_rows =
std::max(1, (l2_bytes_to_use - l2_depth * l2_cols) /
(num_threads * (l2_depth + 4 * l2_cols)));
- int min_l2_rows_blocks =
- std::max(1, CeilQuotient(rows, max_cache_friendly_l2_rows));
- l2_rows =
- RoundUp<KernelFormat::kRows>(CeilQuotient(rows, min_l2_rows_blocks));
+ int min_l2_rows_blocks = std::max(
+ 1, CeilQuotient(per_thread_rows, max_cache_friendly_l2_rows));
+ l2_rows = RoundUp<KernelFormat::kRows>(
+ CeilQuotient(per_thread_rows, min_l2_rows_blocks));
}
*out_l2_rows = l2_rows;
#ifndef GEMMLOWP_INTERNAL_COMMON_H_
#define GEMMLOWP_INTERNAL_COMMON_H_
-#include <pthread.h>
+#include "../internal/platform.h"
+#include "../profiling/pthread_everywhere.h"
#include <algorithm>
#include <cassert>
#define GEMMLOWP_ARM
#endif
+// Detect MIPS, 32-bit or 64-bit
+#if defined(__mips) && !defined(__LP64__)
+#define GEMMLOWP_MIPS_32
+#endif
+
+#if defined(__mips) && defined(__LP64__)
+#define GEMMLOWP_MIPS_64
+#endif
+
+#if defined(GEMMLOWP_MIPS_32) || defined(GEMMLOWP_MIPS_64)
+#define GEMMLOWP_MIPS
+#endif
+
// Detect x86, 32-bit or 64-bit
#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__i386)
#define GEMMLOWP_X86_32
#define GEMMLOWP_NEON_64
#endif
+// Detect MIPS MSA.
+// Limit MSA optimizations to little-endian CPUs for now.
+// TODO: Perhaps, eventually support MSA optimizations on big-endian CPUs?
+#if defined(GEMMLOWP_MIPS) && (__mips_isa_rev >= 5) && defined(__mips_msa) && \
+ defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#define GEMMLOWP_MSA
+#endif
+
+// Convenience MIPS MSA tokens for 32-bit or 64-bit.
+#if defined(GEMMLOWP_MSA) && defined(GEMMLOWP_MIPS_32)
+#define GEMMLOWP_MSA_32
+#endif
+
+#if defined(GEMMLOWP_MSA) && defined(GEMMLOWP_MIPS_64)
+#define GEMMLOWP_MSA_64
+#endif
+
// Detect SSE.
#ifdef __SSE4_1__
#define GEMMLOWP_SSE4
#endif
// Convenience SSE4 tokens for 32-bit or 64-bit
-#if defined(GEMMLOWP_SSE4) && defined(GEMMLOWP_X86_32)
+#if defined(GEMMLOWP_SSE4) && defined(GEMMLOWP_X86_32) && \
+ !defined(GEMMLOWP_DISABLE_SSE4)
#define GEMMLOWP_SSE4_32
#endif
#define GEMMLOWP_SSE3_32
#endif
-#if defined(GEMMLOWP_SSE4) && defined(GEMMLOWP_X86_64)
+#if defined(GEMMLOWP_SSE4) && defined(GEMMLOWP_X86_64) && \
+ !defined(GEMMLOWP_DISABLE_SSE4)
#define GEMMLOWP_SSE4_64
#endif
// x86-32 and not Android. Same as x86-64 but less bullish.
const int kDefaultL1CacheSize = 32 * 1024;
const int kDefaultL2CacheSize = 2 * 1024 * 1024;
+#elif defined(GEMMLOWP_MIPS)
+// MIPS and not Android. TODO: MIPS and Android?
+const int kDefaultL1CacheSize = 32 * 1024;
+const int kDefaultL2CacheSize = 1024 * 1024;
#else
// Less common hardware. Maybe some unusual or older or embedded thing.
// Assume smaller caches, but don't depart too far from what we do
// leaving __builtin_prefetch a no-op on this architecture.
// For our purposes, "pldl1keep" is usually what we want, meaning:
// "prefetch for load, into L1 cache, using each value multiple times".
- asm volatile("prfm pldl1keep, [%[ptr]]\n" ::[ptr] "r"(ptr) : );
+ asm volatile("prfm pldl1keep, [%[ptr]]\n" ::[ptr] "r"(ptr) :);
#elif defined \
__GNUC__ // Clang and GCC define __GNUC__ and have __builtin_prefetch.
__builtin_prefetch(ptr);
// Returns the offset into a cell, at which a given coefficient is stored.
template <typename CellFormat>
inline int OffsetIntoCell(int w, int d) {
+ const int size = CellFormat::kWidth;
switch (CellFormat::kOrder) {
case CellOrder::DepthMajor:
return w + d * CellFormat::kWidth;
return d + w * CellFormat::kDepth;
case CellOrder::Diagonal:
assert(CellFormat::kWidth == CellFormat::kDepth);
- static const int size = CellFormat::kWidth;
return ((size + w - d) * size + d) % (size * size);
default:
assert(false);
namespace gemmlowp {
-template <bool MaxProductIsLessThan4096,
- bool LhsAlwaysNonzero>
+template <bool MaxProductIsLessThan4096, bool LhsAlwaysNonzero>
struct DefaultKernelImpl {};
// Partial specialization implementing the logic that if we want to use
} // end namespace gemmlowp
-#define GEMMLOWP_SET_DEFAULT_KERNEL(MaxProductIsLessThan4096, \
- LhsAlwaysNonzero, Kernel) \
- namespace gemmlowp { \
- template <> \
- struct DefaultKernelImpl<MaxProductIsLessThan4096, \
- LhsAlwaysNonzero> : Kernel {}; \
+#define GEMMLOWP_SET_DEFAULT_KERNEL(MaxProductIsLessThan4096, \
+ LhsAlwaysNonzero, Kernel) \
+ namespace gemmlowp { \
+ template <> \
+ struct DefaultKernelImpl<MaxProductIsLessThan4096, LhsAlwaysNonzero> \
+ : Kernel {}; \
}
#if defined GEMMLOWP_NEON_32
GEMMLOWP_SET_DEFAULT_KERNEL(false, false, NEON_64_Kernel12x8Depth2)
GEMMLOWP_SET_DEFAULT_KERNEL(false, true,
NEON_64bit_GEMM_Int8Operands_LhsNonzero)
+#elif defined(GEMMLOWP_MSA)
+#include "kernel_msa.h"
+GEMMLOWP_SET_DEFAULT_KERNEL(false, false, MSA_Kernel12x8Depth2)
#elif defined GEMMLOWP_SSE4_32
#include "kernel_sse.h"
GEMMLOWP_SET_DEFAULT_KERNEL(false, false, SSE4_32_Kernel4x4Depth2)
#include "kernel_sse.h"
GEMMLOWP_SET_DEFAULT_KERNEL(false, false, SSE4_64_Kernel12x4Depth2)
#else
-#ifndef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
-#if defined __ARM_ARCH_5TE__
-// SIMD is not available on this platform. The slow fallback will be used.
-// Don't require GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK because there's nothing
-// the user can do about it.
-#else
-#error \
- "SIMD not enabled, you'd be getting a slow software fallback. Consider \
-enabling SIMD extensions (for example using -msse4 if you're on modern x86). \
-If that's not an option, and you would like to continue with the \
-slow fallback, define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK."
-#endif
-#endif
#include "kernel_reference.h"
namespace gemmlowp {
typedef ReferenceKernel<KernelFormat<
--- /dev/null
+// Copyright 2018 The Gemmlowp Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// kernel_msa.h: a collection of MSA optimized kernels.
+// Check in kernel_default.h which one(s) are actually used by default.
+// Others are mere experiments; they are still covered by tests
+// in case they might be useful some day.
+
+#ifndef GEMMLOWP_INTERNAL_KERNEL_MSA_H_
+#define GEMMLOWP_INTERNAL_KERNEL_MSA_H_
+
+#include "kernel.h"
+
+#include <msa.h>
+#include <cassert>
+
+namespace gemmlowp {
+
+#ifdef GEMMLOWP_MSA
+
+// Some convenience macros to hide differences between MIPS32 and MIPS64.
+#ifdef GEMMLOWP_MIPS_64
+#define GEMMLOWP_MIPS_XADDU "daddu"
+#define GEMMLOWP_MIPS_XADDIU "daddiu"
+#define GEMMLOWP_MIPS_XSLL "dsll"
+#else
+#define GEMMLOWP_MIPS_XADDU "addu"
+#define GEMMLOWP_MIPS_XADDIU "addiu"
+#define GEMMLOWP_MIPS_XSLL "sll"
+#endif
+
+// Our main GEMM kernel.
+struct MSA_Kernel12x8Depth2 : KernelBase {
+ typedef KernelFormat<KernelSideFormat<CellFormat<4, 2>, 3>,
+ KernelSideFormat<CellFormat<4, 2>, 2> >
+ Format;
+
+ const char* Name() const override { return "MSA, 12x8, depth 2"; }
+
+ // TODO(benoitjacob): reorder function arguments so dst comes last
+ void Run(std::int32_t* dst_ptr, std::size_t dst_row_stride,
+ std::size_t dst_col_stride, const std::uint8_t* lhs_ptr,
+ const std::uint8_t* rhs_ptr, std::size_t start_depth,
+ std::size_t run_depth) const override {
+ ScopedProfilingLabel label("optimized kernel (MSA 12x8)");
+// See comments above for why we need local numerical labels in our asm.
+#define GEMMLOWP_LABEL_CLEAR_ACCUMULATORS "1"
+#define GEMMLOWP_LABEL_BEFORE_LOOP "2"
+#define GEMMLOWP_LABEL_LOOP "3"
+#define GEMMLOWP_LABEL_AFTER_LOOP "4"
+
+ assert(dst_row_stride == 1);
+ asm volatile(
+ // Set a temp to all zeroes.
+ "ldi.b $w31, 0\n"
+
+ // Multiply dst_col_stride by 4 == sizeof(int32) to use
+ // it as a byte offset below.
+ GEMMLOWP_MIPS_XSLL
+ " %[dst_col_stride], %[dst_col_stride], 2\n"
+
+ // Check if start_depth==0 to decide whether we will clear
+ // accumulators or load existing accumulators.
+ "beqz %[start_depth], " GEMMLOWP_LABEL_CLEAR_ACCUMULATORS "f\n"
+
+ // Load accumulators (start_depth != 0).
+ GEMMLOWP_MIPS_XADDU
+ " $a0, %[dst_ptr], %[dst_col_stride]\n"
+ "ld.w $w0, (0*16)(%[dst_ptr])\n"
+ "ld.w $w4, (1*16)(%[dst_ptr])\n"
+ "ld.w $w8, (2*16)(%[dst_ptr])\n" GEMMLOWP_MIPS_XADDU
+ " $a1, $a0, %[dst_col_stride]\n"
+ "ld.w $w1, (0*16)($a0)\n"
+ "ld.w $w5, (1*16)($a0)\n"
+ "ld.w $w9, (2*16)($a0)\n" GEMMLOWP_MIPS_XADDU
+ " $a0, $a1, %[dst_col_stride]\n"
+ "ld.w $w2, (0*16)($a1)\n"
+ "ld.w $w6, (1*16)($a1)\n"
+ "ld.w $w10, (2*16)($a1)\n" GEMMLOWP_MIPS_XADDU
+ " $a1, $a0, %[dst_col_stride]\n"
+ "ld.w $w3, (0*16)($a0)\n"
+ "ld.w $w7, (1*16)($a0)\n"
+ "ld.w $w11, (2*16)($a0)\n" GEMMLOWP_MIPS_XADDU
+ " $a0, $a1, %[dst_col_stride]\n"
+ "ld.w $w12, (0*16)($a1)\n"
+ "ld.w $w16, (1*16)($a1)\n"
+ "ld.w $w20, (2*16)($a1)\n" GEMMLOWP_MIPS_XADDU
+ " $a1, $a0, %[dst_col_stride]\n"
+ "ld.w $w13, (0*16)($a0)\n"
+ "ld.w $w17, (1*16)($a0)\n"
+ "ld.w $w21, (2*16)($a0)\n" GEMMLOWP_MIPS_XADDU
+ " $a0, $a1, %[dst_col_stride]\n"
+ "ld.w $w14, (0*16)($a1)\n"
+ "ld.w $w18, (1*16)($a1)\n"
+ "ld.w $w22, (2*16)($a1)\n"
+ "ld.w $w15, (0*16)($a0)\n"
+ "ld.w $w19, (1*16)($a0)\n"
+ "ld.w $w23, (2*16)($a0)\n"
+ "b " GEMMLOWP_LABEL_BEFORE_LOOP "f\n"
+
+ GEMMLOWP_LABEL_CLEAR_ACCUMULATORS
+ ":\n"
+ // Clear accumulators (start_depth == 0).
+ "ldi.w $w0, 0\n"
+ "ldi.w $w4, 0\n"
+ "ldi.w $w8, 0\n"
+ "ldi.w $w1, 0\n"
+ "ldi.w $w5, 0\n"
+ "ldi.w $w9, 0\n"
+ "ldi.w $w2, 0\n"
+ "ldi.w $w6, 0\n"
+ "ldi.w $w10, 0\n"
+ "ldi.w $w3, 0\n"
+ "ldi.w $w7, 0\n"
+ "ldi.w $w11, 0\n"
+ "ldi.w $w12, 0\n"
+ "ldi.w $w16, 0\n"
+ "ldi.w $w20, 0\n"
+ "ldi.w $w13, 0\n"
+ "ldi.w $w17, 0\n"
+ "ldi.w $w21, 0\n"
+ "ldi.w $w14, 0\n"
+ "ldi.w $w18, 0\n"
+ "ldi.w $w22, 0\n"
+ "ldi.w $w15, 0\n"
+ "ldi.w $w19, 0\n"
+ "ldi.w $w23, 0\n"
+
+ GEMMLOWP_LABEL_BEFORE_LOOP ":\n"
+
+ GEMMLOWP_LABEL_LOOP
+ ":\n"
+ // Overview of register layout:
+ //
+ // A half of the 2 2x4 cells of Rhs is stored in 16bit in w27-w30
+ // (each register contains 4 replicas of a pair of elements).
+ // A 12x2 block of 3 4x2 cells Lhs is stored in 16bit in w24-w26.
+ // A 12x8 block of accumulators is stored in 32bit in w0-w23.
+ //
+ // +------+------+------+------+
+ // Rhs |w27 |w28 |w29 |w30 |
+ // +------+------+------+------+
+ //
+ // | | | | |
+ //
+ // Lhs | | | | |
+ //
+ // +---+ - - - - +------+------+------+------+
+ // |w24| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // +---+ - - - - +------+------+------+------+
+ // |w25| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // +---+ - - - - +------+------+------+------+
+ // |w26| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26| |w8/20 |w9/21 |w10/22|w11/23|
+ // +---+ - - - - +------+------+------+------+
+ //
+ // Accumulators
+
+ // Load 3 x 8 bytes of lhs[] with 2 16-byte overlapped loads.
+ "ld.b $w24, 0(%[lhs_ptr])\n"
+ "ld.b $w25, 8(%[lhs_ptr])\n"
+
+ // Load 4 bytes of rhs[] for the first half of depth 0.
+ "lbu $a0, 0(%[rhs_ptr])\n"
+ "lbu $a1, 1(%[rhs_ptr])\n"
+ "lbu $a2, 2(%[rhs_ptr])\n"
+ "lbu $a3, 3(%[rhs_ptr])\n"
+ // Load 4 bytes of rhs[] for the first half of depth 1.
+ "lbu $v0, 4(%[rhs_ptr])\n"
+ "lbu $v1, 5(%[rhs_ptr])\n"
+ "lbu $t8, 6(%[rhs_ptr])\n"
+ "lbu $t9, 7(%[rhs_ptr])\n"
+
+ // Zero-extend 8-bit elements of lhs[] to 16 bits.
+ "ilvr.b $w24, $w31, $w24\n"
+ "ilvl.b $w26, $w31, $w25\n"
+ "ilvr.b $w25, $w31, $w25\n"
+ // Interleave depth 0 and depth 1 elements of lhs[] for dpadd_u.w.
+ "ilvl.d $w27, $w31, $w24\n"
+ "ilvl.d $w28, $w31, $w25\n"
+ "ilvl.d $w29, $w31, $w26\n"
+ "ilvr.h $w24, $w27, $w24\n"
+ "ilvr.h $w25, $w28, $w25\n"
+ "ilvr.h $w26, $w29, $w26\n"
+
+ // Combine and interleave depth 0 and depth 1 elements of rhs[] for
+ // dpadd_u.w (for the first half).
+ "ins $a0, $v0, 16, 8\n"
+ "ins $a1, $v1, 16, 8\n"
+ "ins $a2, $t8, 16, 8\n"
+ "ins $a3, $t9, 16, 8\n"
+ // Make 4 replicas of every pair of rhs[] elements.
+ "fill.w $w27, $a0\n"
+ "fill.w $w28, $a1\n"
+ "fill.w $w29, $a2\n"
+ "fill.w $w30, $a3\n"
+
+ // Load 4 bytes of rhs[] for the second half of depth 0.
+ "lbu $a0, 8(%[rhs_ptr])\n"
+ "lbu $a1, 9(%[rhs_ptr])\n"
+ "lbu $a2, 10(%[rhs_ptr])\n"
+ "lbu $a3, 11(%[rhs_ptr])\n"
+ // Load 4 bytes of rhs[] for the second half of depth 1.
+ "lbu $v0, 12(%[rhs_ptr])\n"
+ "lbu $v1, 13(%[rhs_ptr])\n"
+ "lbu $t8, 14(%[rhs_ptr])\n"
+ "lbu $t9, 15(%[rhs_ptr])\n"
+
+ // First half of depths 0 and 1.
+ // Dot-product-(and)-add doubles multiplicand width.
+ "dpadd_u.w $w0, $w24, $w27\n"
+ "dpadd_u.w $w4, $w25, $w27\n"
+ "dpadd_u.w $w8, $w26, $w27\n"
+ "dpadd_u.w $w1, $w24, $w28\n"
+ "dpadd_u.w $w5, $w25, $w28\n"
+ "dpadd_u.w $w9, $w26, $w28\n"
+ "dpadd_u.w $w2, $w24, $w29\n"
+ "dpadd_u.w $w6, $w25, $w29\n"
+ "dpadd_u.w $w10, $w26, $w29\n"
+ "dpadd_u.w $w3, $w24, $w30\n"
+ "dpadd_u.w $w7, $w25, $w30\n"
+ "dpadd_u.w $w11, $w26, $w30\n"
+
+ // Combine and interleave depth 0 and depth 1 elements of rhs[] for
+ // dpadd_u.w (for the second half).
+ "ins $a0, $v0, 16, 8\n"
+ "ins $a1, $v1, 16, 8\n"
+ "ins $a2, $t8, 16, 8\n"
+ "ins $a3, $t9, 16, 8\n"
+ // Make 4 replicas of every pair of rhs[] elements.
+ "fill.w $w27, $a0\n"
+ "fill.w $w28, $a1\n"
+ "fill.w $w29, $a2\n"
+ "fill.w $w30, $a3\n"
+
+ // Second half of depths 0 and 1.
+ // Dot-product-(and)-add doubles multiplicand width.
+ "dpadd_u.w $w12, $w24, $w27\n"
+ "dpadd_u.w $w16, $w25, $w27\n"
+ "dpadd_u.w $w20, $w26, $w27\n"
+ "dpadd_u.w $w13, $w24, $w28\n"
+ "dpadd_u.w $w17, $w25, $w28\n"
+ "dpadd_u.w $w21, $w26, $w28\n"
+ "dpadd_u.w $w14, $w24, $w29\n"
+ "dpadd_u.w $w18, $w25, $w29\n"
+ "dpadd_u.w $w22, $w26, $w29\n"
+ "dpadd_u.w $w15, $w24, $w30\n"
+ "dpadd_u.w $w19, $w25, $w30\n"
+ "dpadd_u.w $w23, $w26, $w30\n"
+
+ GEMMLOWP_MIPS_XADDIU " %[run_depth], -2\n" GEMMLOWP_MIPS_XADDIU
+ " %[lhs_ptr], 24\n" GEMMLOWP_MIPS_XADDIU
+ " %[rhs_ptr], 16\n"
+ "bnez %[run_depth]," GEMMLOWP_LABEL_LOOP "b\n"
+
+ GEMMLOWP_LABEL_AFTER_LOOP ":\n"
+
+ // Store accumulators.
+ GEMMLOWP_MIPS_XADDU
+ " $a0, %[dst_ptr], %[dst_col_stride]\n"
+ "st.w $w0, (0*16)(%[dst_ptr])\n"
+ "st.w $w4, (1*16)(%[dst_ptr])\n"
+ "st.w $w8, (2*16)(%[dst_ptr])\n" GEMMLOWP_MIPS_XADDU
+ " $a1, $a0, %[dst_col_stride]\n"
+ "st.w $w1, (0*16)($a0)\n"
+ "st.w $w5, (1*16)($a0)\n"
+ "st.w $w9, (2*16)($a0)\n" GEMMLOWP_MIPS_XADDU
+ " $a0, $a1, %[dst_col_stride]\n"
+ "st.w $w2, (0*16)($a1)\n"
+ "st.w $w6, (1*16)($a1)\n"
+ "st.w $w10, (2*16)($a1)\n" GEMMLOWP_MIPS_XADDU
+ " $a1, $a0, %[dst_col_stride]\n"
+ "st.w $w3, (0*16)($a0)\n"
+ "st.w $w7, (1*16)($a0)\n"
+ "st.w $w11, (2*16)($a0)\n" GEMMLOWP_MIPS_XADDU
+ " $a0, $a1, %[dst_col_stride]\n"
+ "st.w $w12, (0*16)($a1)\n"
+ "st.w $w16, (1*16)($a1)\n"
+ "st.w $w20, (2*16)($a1)\n" GEMMLOWP_MIPS_XADDU
+ " $a1, $a0, %[dst_col_stride]\n"
+ "st.w $w13, (0*16)($a0)\n"
+ "st.w $w17, (1*16)($a0)\n"
+ "st.w $w21, (2*16)($a0)\n" GEMMLOWP_MIPS_XADDU
+ " $a0, $a1, %[dst_col_stride]\n"
+ "st.w $w14, (0*16)($a1)\n"
+ "st.w $w18, (1*16)($a1)\n"
+ "st.w $w22, (2*16)($a1)\n"
+ "st.w $w15, (0*16)($a0)\n"
+ "st.w $w19, (1*16)($a0)\n"
+ "st.w $w23, (2*16)($a0)\n"
+ : // outputs
+ [lhs_ptr] "+r"(lhs_ptr), [rhs_ptr] "+r"(rhs_ptr),
+ [run_depth] "+r"(run_depth),
+ [dst_col_stride] "+r"(dst_col_stride)
+ : // inputs
+ [dst_ptr] "r"(dst_ptr),
+ [start_depth] "r"(start_depth)
+ : // clobbers
+ "memory", "v0", "v1", "a0", "a1", "a2", "a3", "t8", "t9", "$f0", "$f1",
+ "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
+ "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20",
+ "$f21", "$f22", "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29",
+ "$f30", "$f31");
+
+#undef GEMMLOWP_LABEL_CLEAR_ACCUMULATORS
+#undef GEMMLOWP_LABEL_BEFORE_LOOP
+#undef GEMMLOWP_LABEL_LOOP
+#undef GEMMLOWP_LABEL_AFTER_LOOP
+ }
+};
+
+#undef GEMMLOWP_MIPS_XADDU
+#undef GEMMLOWP_MIPS_XADDIU
+#undef GEMMLOWP_MIPS_XSLL
+
+#endif // GEMMLOWP_MSA
+
+} // namespace gemmlowp
+
+#endif // GEMMLOWP_INTERNAL_KERNEL_MSA_H_
GEMMLOWP_LOOP_NEON_32_KERNEL_12X4_DEPTH2_ASSUMING_12BIT_PRODUCTS
":\n"
-// Overview of register layout:
-//
-// Registers q4--q16 are the local 16-bit accumulators.
-// However, each entry in the result matrix is represented
-// by *two* local 16-bit accumulators: one for even levels
-// of depth and one for odd levels of depth. These correspond
-// to the scalars at even and odd indices within each q-register.
-// Thus we effectively use 32 bits of register space for each
-// entry in the result matrix. The accumulators register layout
-// is the same as was described above for the global 32-bit
-// accumulators (3 cells of size 4x4 in diagonal-major order)
-// with the only difference that instead of 32bit values we have
-// pairs of 16bit values.
-//
-// A 2x4 cell of Rhs is stored in 8bit in d0.
-// A 12x2 block of 3 4x2 cells Lhs is stored in 8bit in d1--d3.
-//
-// +--------+--------+--------+--------+
-// |d0[0] |d0[2] |d0[4] |d0[6] |
-// Rhs +--------+--------+--------+--------+
-// |d0[1] |d0[3] |d0[5] |d0[7] |
-// +--------+--------+--------+--------+
-//
-// | | | | |
-//
-// Lhs | | | | |
-//
-// +-----+-----+ - - - +--------+--------+--------+--------+
-// |d1[0]|d1[1]| |q4[0,1] |q5[0,1] |q6[0,1] |q7[0,1] |
-// |d1[2]|d1[3]| |q7[2,3] |q4[2,3] |q5[2,3] |q6[2,3] |
-// |d1[4]|d1[5]| |q6[4,5] |q7[4,5] |q4[4,5] |q5[4,5] |
-// |d1[6]|d1[7]| |q5[6,7] |q6[6,7] |q7[6,7] |q4[6,7] |
-// +-----+-----+ - - - +--------+--------+--------+--------+
-// |d2[0]|d2[1]| |q8[0,1] |q8[0,1] |q8[0,1] |q8[0,1] |
-// |d2[2]|d2[3]| |q9[2,3] |q9[2,3] |q9[2,3] |q9[2,3] |
-// |d2[4]|d2[5]| |q10[4,5]|q10[4,5]|q10[4,5]|q10[4,5]|
-// |d2[6]|d2[7]| |q11[6,7]|q11[6,7]|q11[6,7]|q11[6,7]|
-// +-----+-----+ - - - +--------+--------+--------+--------+
-// |d3[0]|d3[1]| |q12[0,1]|q12[0,1]|q12[0,1]|q12[0,1]|
-// |d3[2]|d3[3]| |q13[2,3]|q13[2,3]|q13[2,3]|q13[2,3]|
-// |d3[4]|d3[5]| |q14[4,5]|q14[4,5]|q14[4,5]|q14[4,5]|
-// |d3[6]|d3[7]| |q15[6,7]|q15[6,7]|q15[6,7]|q15[6,7]|
-// +-----+-----+ - - - +--------+--------+--------+--------+
-//
-// Local 16-bit accumulators
-// Note: 2 scalars per matrix entry
+ // Overview of register layout:
+ //
+ // Registers q4--q16 are the local 16-bit accumulators.
+ // However, each entry in the result matrix is represented
+ // by *two* local 16-bit accumulators: one for even levels
+ // of depth and one for odd levels of depth. These correspond
+ // to the scalars at even and odd indices within each q-register.
+ // Thus we effectively use 32 bits of register space for each
+ // entry in the result matrix. The accumulators register layout
+ // is the same as was described above for the global 32-bit
+ // accumulators (3 cells of size 4x4 in diagonal-major order)
+ // with the only difference that instead of 32bit values we have
+ // pairs of 16bit values.
+ //
+ // A 2x4 cell of Rhs is stored in 8bit in d0.
+ // A 12x2 block of 3 4x2 cells Lhs is stored in 8bit in d1--d3.
+ //
+ // +--------+--------+--------+--------+
+ // |d0[0] |d0[2] |d0[4] |d0[6] |
+ // Rhs +--------+--------+--------+--------+
+ // |d0[1] |d0[3] |d0[5] |d0[7] |
+ // +--------+--------+--------+--------+
+ //
+ // | | | | |
+ //
+ // Lhs | | | | |
+ //
+ // +-----+-----+ - - - +--------+--------+--------+--------+
+ // |d1[0]|d1[1]| |q4[0,1] |q5[0,1] |q6[0,1] |q7[0,1] |
+ // |d1[2]|d1[3]| |q7[2,3] |q4[2,3] |q5[2,3] |q6[2,3] |
+ // |d1[4]|d1[5]| |q6[4,5] |q7[4,5] |q4[4,5] |q5[4,5] |
+ // |d1[6]|d1[7]| |q5[6,7] |q6[6,7] |q7[6,7] |q4[6,7] |
+ // +-----+-----+ - - - +--------+--------+--------+--------+
+ // |d2[0]|d2[1]| |q8[0,1] |q8[0,1] |q8[0,1] |q8[0,1] |
+ // |d2[2]|d2[3]| |q9[2,3] |q9[2,3] |q9[2,3] |q9[2,3] |
+ // |d2[4]|d2[5]| |q10[4,5]|q10[4,5]|q10[4,5]|q10[4,5]|
+ // |d2[6]|d2[7]| |q11[6,7]|q11[6,7]|q11[6,7]|q11[6,7]|
+ // +-----+-----+ - - - +--------+--------+--------+--------+
+ // |d3[0]|d3[1]| |q12[0,1]|q12[0,1]|q12[0,1]|q12[0,1]|
+ // |d3[2]|d3[3]| |q13[2,3]|q13[2,3]|q13[2,3]|q13[2,3]|
+ // |d3[4]|d3[5]| |q14[4,5]|q14[4,5]|q14[4,5]|q14[4,5]|
+ // |d3[6]|d3[7]| |q15[6,7]|q15[6,7]|q15[6,7]|q15[6,7]|
+ // +-----+-----+ - - - +--------+--------+--------+--------+
+ //
+ // Local 16-bit accumulators
+ // Note: 2 scalars per matrix entry
#define GEMMLOWP_ACCUMULATE_2_LEVELS_OF_DEPTH \
/* Load 3 Lhs cells of size 4x2 */ \
}
};
-
// Our main GEMM kernel.
struct NEON_64_Kernel12x8Depth2 : KernelBase {
typedef KernelFormat<KernelSideFormat<CellFormat<4, 2>, 3>,
#ifndef GEMMLOWP_INTERNAL_MULTI_THREAD_GEMM_H_
#define GEMMLOWP_INTERNAL_MULTI_THREAD_GEMM_H_
-#include <pthread.h>
-#include <unistd.h>
#include <vector>
#include "single_thread_gemm.h"
#undef GEMMLOWP_NOP
inline void WriteBarrier() {
-#ifdef GEMMLOWP_ARM_32
+#if defined(_MSC_VER)
+ MemoryBarrier();
+#elif defined(GEMMLOWP_ARM_32)
asm volatile("" ::: "memory");
#elif defined(GEMMLOWP_ARM_64)
asm volatile("dmb ishst" ::: "memory");
}
inline void ReadBarrier() {
-#ifdef GEMMLOWP_ARM_32
+#if defined(_MSC_VER)
+ MemoryBarrier();
+#elif defined(GEMMLOWP_ARM_32)
asm volatile("" ::: "memory");
#elif defined(GEMMLOWP_ARM_64)
asm volatile("dmb ishld" ::: "memory");
// to have finished working.
class BlockingCounter {
public:
- BlockingCounter()
- : cond_(PTHREAD_COND_INITIALIZER),
- mutex_(PTHREAD_MUTEX_INITIALIZER),
- count_(0),
- initial_count_(0) {}
+ BlockingCounter() : count_(0), initial_count_(0) {
+ pthread_cond_init(&cond_, nullptr);
+ pthread_mutex_init(&mutex_, nullptr);
+ }
+
+ ~BlockingCounter() {
+ pthread_cond_destroy(&cond_);
+ pthread_mutex_destroy(&mutex_);
+ }
// Sets/resets the counter; initial_count is the number of
// decrementing events that the Wait() call will be waiting for.
#else
// This is likely unnecessary, but is kept to ensure regressions are not
// introduced.
+#ifndef _WIN32
asm volatile("" ::: "memory");
+#endif
#endif
const std::size_t count_value = count_;
if (count_value) {
explicit Worker(BlockingCounter* counter_to_decrement_when_ready)
: task_(nullptr),
- state_cond_(PTHREAD_COND_INITIALIZER),
- state_mutex_(PTHREAD_MUTEX_INITIALIZER),
state_(State::ThreadStartup),
counter_to_decrement_when_ready_(counter_to_decrement_when_ready) {
+ pthread_cond_init(&state_cond_, nullptr);
+ pthread_mutex_init(&state_mutex_, nullptr);
pthread_create(&thread_, nullptr, ThreadFunc, this);
}
~Worker() {
ChangeState(State::ExitAsSoonAsPossible);
pthread_join(thread_, nullptr);
+ pthread_cond_destroy(&state_cond_);
+ pthread_mutex_destroy(&state_mutex_);
}
// Changes State; may be called from either the worker thread
void Execute(const std::vector<Task*>& tasks) {
assert(tasks.size() >= 1);
// One of the tasks will be run on the current thread.
- int workers_count = tasks.size() - 1;
+ std::size_t workers_count = tasks.size() - 1;
CreateWorkers(workers_count);
assert(workers_count <= workers_.size());
counter_to_decrement_when_ready_.Reset(workers_count);
int n = 0;
- std::for_each(tasks.begin(), --tasks.end(), [this, &n](Task *task) {
- workers_[n++]->StartWork(task);
- });
+ std::for_each(tasks.begin(), --tasks.end(),
+ [this, &n](Task* task) { workers_[n++]->StartWork(task); });
// Execute the remaining workload immediately on the current thread.
Task* task = tasks.back();
task->local_allocator = &main_thread_task_allocator_;
counter_to_decrement_when_ready_.Wait();
// Cleanup tasks (best to do this from the same thread that allocated
// the memory).
- std::for_each(tasks.begin(), tasks.end(), [](Task *task) {
- delete task;
- });
+ std::for_each(tasks.begin(), tasks.end(), [](Task* task) { delete task; });
}
private:
template <typename KernelFormat, typename InputScalar, typename OutputScalar,
typename BitDepthParams, MapOrder LhsOrder, MapOrder RhsOrder,
MapOrder ResultOrder, typename LhsOffset, typename RhsOffset,
- typename OutputPipelineType, typename GemmContextType>
+ typename OutputPipelineType, typename GemmContextType>
struct GemmWithPackedRhsTask : Task {
typedef PackedSideBlock<typename KernelFormat::Lhs> PackedLhs;
typedef PackedSideBlock<typename KernelFormat::Rhs> PackedRhs;
- GemmWithPackedRhsTask(GemmContextType* _context,
- const KernelBase& _kernel,
+ GemmWithPackedRhsTask(GemmContextType* _context, const KernelBase& _kernel,
const MatrixMap<const InputScalar, LhsOrder>& _lhs,
const PackedRhs& _packed_rhs,
MatrixMap<OutputScalar, ResultOrder>* _result,
const MatrixBlockBounds& _result_block,
const LhsOffset& _lhs_offset,
const RhsOffset& _rhs_offset,
+ const BlockParams& _block_params,
const OutputPipelineType& _output_pipeline)
: context(_context),
kernel(_kernel),
result_block(_result_block),
lhs_offset(_lhs_offset),
rhs_offset(_rhs_offset),
+ block_params(_block_params),
output_pipeline(_output_pipeline) {}
void Run() override {
const int cols = result_block.cols;
const int depth = lhs.cols();
- BlockParams block_params;
- block_params.Init<KernelFormat>(rows, cols, depth, 1,
- context->l1_bytes_to_use(),
- context->l2_bytes_to_use(),
- context->l2_rhs_factor());
-
PackedLhs packed_lhs(Side::Lhs, local_allocator, block_params);
PackedResult packed_result(local_allocator, block_params);
const MatrixBlockBounds result_block;
const LhsOffset& lhs_offset;
const RhsOffset& rhs_offset;
+ const BlockParams& block_params;
const OutputPipelineType& output_pipeline;
};
WorkersPool workers_pool_;
};
-// Needed by chrome native builds
-#ifndef _SC_NPROCESSORS_CONF
-#define _SC_NPROCESSORS_CONF _SC_NPROCESSORS_ONLN
-#endif
-
// Determines how many threads should be used for a given Gemm
// operation.
template <int KernelRows>
}
// Determine the maximum number of threads.
- int max_count = max_num_threads;
- // The special value 0 means try to determine the total number of cores.
- if (max_count == 0) {
- // No user-set maximum number of threads, so we need to
- // do some hardware detection.
- // This is expensive to query so we do it only once.
- // Too bad for dynamicness. Also, we dont use the c++11 standard getter
- // because Google's coding style currently bans #include <thread_>.
- static const int hardware_threads_count =
- static_cast<int>(sysconf(_SC_NPROCESSORS_CONF));
-
- max_count = hardware_threads_count;
- }
+ int max_count = GetHardwareConcurrency(max_num_threads);
// Basic calculation: take into account max pool size, and
// how many rows we have to feed our kernel.
auto* workers_pool = context->workers_pool();
BlockParams block_params;
- block_params.Init<KernelFormat>(rows, cols, depth, task_count,
- context->l1_bytes_to_use(),
- context->l2_bytes_to_use(),
- context->l2_rhs_factor());
+ block_params.Init<KernelFormat>(
+ rows, cols, depth, task_count, context->l1_bytes_to_use(),
+ context->l2_bytes_to_use(), context->l2_rhs_factor());
PackedSideBlock<typename KernelFormat::Rhs> packed_rhs(Side::Rhs, allocator,
block_params);
int next_start_row = 0;
for (int n = 0; n < task_count; ++n) {
int start_row = next_start_row;
- next_start_row = std::min(rows, RoundUp<KernelFormat::kRows>(
- rows * (n + 1) / task_count));
+ next_start_row = std::min(
+ rows, RoundUp<KernelFormat::kRows>(rows * (n + 1) / task_count));
int block_rows = next_start_row - start_row;
auto lhs_block = lhs.block(start_row, 0, block_rows, depth);
- typedef GemmWithPackedRhsTask<
- KernelFormat, InputScalar, OutputScalar, BitDepthParams, LhsOrder,
- RhsOrder, ResultOrder, LhsOffset, RhsOffset, OutputPipelineType,
- GemmContextType>
+ typedef GemmWithPackedRhsTask<KernelFormat, InputScalar, OutputScalar,
+ BitDepthParams, LhsOrder, RhsOrder,
+ ResultOrder, LhsOffset, RhsOffset,
+ OutputPipelineType, GemmContextType>
TaskType;
- tasks.push_back(new TaskType(context, kernel, lhs_block, packed_rhs, result,
- MatrixBlockBounds(start_row, c, block_rows, cs),
- lhs_offset, rhs_offset, output_pipeline));
+ tasks.push_back(
+ new TaskType(context, kernel, lhs_block, packed_rhs, result,
+ MatrixBlockBounds(start_row, c, block_rows, cs),
+ lhs_offset, rhs_offset, block_params, output_pipeline));
}
// Execute the work on the workers (and partially on this thread).
workers_pool->Execute(tasks);
template <int Size>
struct OutputStageEvalBufferImpl<
- OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint,
+ OutputStageQuantizeDownInt32ByFixedPoint,
RegisterBuffer<std::int32_t, Size>> {
typedef RegisterBuffer<std::int32_t, Size> InputType;
typedef RegisterBuffer<std::int32_t, Size> OutputType;
- typedef OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint OutputStage;
+ typedef OutputStageQuantizeDownInt32ByFixedPoint OutputStage;
OutputStageEvalBufferImpl(const OutputStage& s) : output_stage(s) {}
const OutputStage& output_stage;
};
+template <int Size>
+struct OutputStageEvalBufferImpl<OutputStageScaleInt32ByFixedPointAndExponent,
+ RegisterBuffer<std::int32_t, Size>> {
+ typedef RegisterBuffer<std::int32_t, Size> InputType;
+ typedef RegisterBuffer<std::int32_t, Size> OutputType;
+
+ typedef OutputStageScaleInt32ByFixedPointAndExponent OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage& s) : output_stage(s) {
+ left_shift = std::max(0, output_stage.result_exponent);
+ right_shift = std::max(0, -output_stage.result_exponent);
+ }
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ using RegisterType = typename InputType::RegisterType;
+ const RegisterType result_offset_after_shift =
+ Dup<RegisterType>(output_stage.result_offset_after_shift);
+ for (int i = 0; i < InputType::kRegisterCount; i++) {
+ const RegisterType mulhigh_val = SaturatingRoundingDoublingHighMul(
+ ShiftLeft(input.reg[i], left_shift),
+ output_stage.result_fixedpoint_multiplier);
+ output.reg[i] = Add(RoundingDivideByPOT(mulhigh_val, right_shift),
+ result_offset_after_shift);
+ }
+ return output;
+ }
+
+ const OutputStage& output_stage;
+ int left_shift;
+ int right_shift;
+};
+
// Implementation of OutputStageSaturatingCastToUint8 for scalar data
template <int Size>
struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToUint8,
}
};
+// Implementation of OutputStageSaturatingCastToInt16 for scalar data
+template <int Size>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegisterBuffer<std::int32_t, Size>> {
+ typedef RegisterBuffer<std::int32_t, Size> InputType;
+ typedef RegisterBuffer<std::int16_t, Size> OutputType;
+ static_assert(InputType::kRegisterLanes == 1,
+ "This path is only for scalar values");
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ for (int i = 0; i < InputType::kRegisterCount; i++) {
+ std::int32_t data = input.reg[i];
+ output.reg[i] = data > 32767 ? 32767 : data < -32768 ? -32768 : data;
+ }
+ return output;
+ }
+};
+
template <int Rows, int Cols, typename VectorType>
struct OutputStageEvalImpl<OutputStageBiasAddition<VectorType>,
RegisterBlock<std::int32_t, Rows, Cols>> {
#include "output_neon.h"
#elif defined(GEMMLOWP_SSE4)
#include "output_sse.h"
+#elif defined(GEMMLOWP_MSA)
+#include "output_msa.h"
#endif
#endif // GEMMLOWP_INTERNAL_OUTPUT_H_
--- /dev/null
+// Copyright 2018 The Gemmlowp Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// output_msa.h: optimized MSA specializations of the templates in output.h.
+
+#ifndef GEMMLOWP_INTERNAL_OUTPUT_MSA_H_
+#define GEMMLOWP_INTERNAL_OUTPUT_MSA_H_
+
+#include "output.h"
+
+#include <msa.h>
+
+namespace gemmlowp {
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToUint8,
+ RegBufferInt32<4>> {
+ typedef RegBufferInt32<4> InputType;
+ typedef RegBufferUint8<4> OutputType;
+
+ typedef OutputStageSaturatingCastToUint8 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ // Signed saturate each 32-bit element to 9 bits
+ // (this takes full care of non-negative elements).
+ v4i32 tmp = __builtin_msa_sat_s_w(input.reg[0], 8);
+ // Pack every 32-bit element into 16 bits.
+ tmp = reinterpret_cast<v4i32>(__builtin_msa_pckev_h(
+ reinterpret_cast<v8i16>(tmp), reinterpret_cast<v8i16>(tmp)));
+ // Detect negative elements with arithmetic shift right (we
+ // get a 16-bit mask of all zeroes or all ones for every element).
+ v8i16 signs = __builtin_msa_srai_h(reinterpret_cast<v8i16>(tmp), 15);
+ // Zero out negative elements.
+ signs = reinterpret_cast<v8i16>(__builtin_msa_bseli_b(
+ reinterpret_cast<v16u8>(signs), reinterpret_cast<v16u8>(tmp), 0));
+ // Pack every element into 8 bits.
+ tmp = reinterpret_cast<v4i32>(__builtin_msa_pckev_b(
+ reinterpret_cast<v16i8>(signs), reinterpret_cast<v16i8>(signs)));
+ // Return 4 uint8_t elements as uint32_t.
+ output.reg[0] = __builtin_msa_copy_s_w(tmp, 0);
+ return output;
+ }
+};
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToUint8,
+ RegBufferInt32<8>> {
+ typedef RegBufferInt32<8> InputType;
+ typedef RegBufferUint8<8> OutputType;
+
+ typedef OutputStageSaturatingCastToUint8 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ // Signed saturate each 32-bit element to 9 bits
+ // (this takes full care of non-negative elements).
+ v4i32 tmp_lo = __builtin_msa_sat_s_w(input.reg[0], 8);
+ v4i32 tmp_hi = __builtin_msa_sat_s_w(input.reg[1], 8);
+ // Pack every 32-bit element into 16 bits,
+ // combining all 8 elements into one vector.
+ tmp_lo = reinterpret_cast<v4i32>(__builtin_msa_pckev_h(
+ reinterpret_cast<v8i16>(tmp_hi), reinterpret_cast<v8i16>(tmp_lo)));
+ // Detect negative elements with arithmetic shift right (we
+ // get a 16-bit mask of all zeroes or all ones for every element).
+ v8i16 signs = __builtin_msa_srai_h(reinterpret_cast<v8i16>(tmp_lo), 15);
+ // Zero out negative elements.
+ signs = reinterpret_cast<v8i16>(__builtin_msa_bseli_b(
+ reinterpret_cast<v16u8>(signs), reinterpret_cast<v16u8>(tmp_lo), 0));
+ // Pack every element into 8 bits.
+ tmp_lo = reinterpret_cast<v4i32>(__builtin_msa_pckev_b(
+ reinterpret_cast<v16i8>(signs), reinterpret_cast<v16i8>(signs)));
+ // Return 8 uint8_t elements as 2 uint32_t's.
+ output.reg[0] = __builtin_msa_copy_s_w(tmp_lo, 0);
+ output.reg[1] = __builtin_msa_copy_s_w(tmp_lo, 1);
+ return output;
+ }
+};
+
+#define GEMMLOWP_MIPS_SAT16(out, in0, in1, in2, in3) \
+ { \
+ v4i32 tmp0 = __builtin_msa_sat_s_w(in0, 8); \
+ v4i32 tmp1 = __builtin_msa_sat_s_w(in1, 8); \
+ v4i32 tmp2 = __builtin_msa_sat_s_w(in2, 8); \
+ v4i32 tmp3 = __builtin_msa_sat_s_w(in3, 8); \
+ tmp0 = reinterpret_cast<v4i32>(__builtin_msa_pckev_h( \
+ reinterpret_cast<v8i16>(tmp1), reinterpret_cast<v8i16>(tmp0))); \
+ tmp2 = reinterpret_cast<v4i32>(__builtin_msa_pckev_h( \
+ reinterpret_cast<v8i16>(tmp3), reinterpret_cast<v8i16>(tmp2))); \
+ v8i16 signs0 = __builtin_msa_srai_h(reinterpret_cast<v8i16>(tmp0), 15); \
+ v8i16 signs1 = __builtin_msa_srai_h(reinterpret_cast<v8i16>(tmp2), 15); \
+ signs0 = reinterpret_cast<v8i16>(__builtin_msa_bseli_b( \
+ reinterpret_cast<v16u8>(signs0), reinterpret_cast<v16u8>(tmp0), 0)); \
+ signs1 = reinterpret_cast<v8i16>(__builtin_msa_bseli_b( \
+ reinterpret_cast<v16u8>(signs1), reinterpret_cast<v16u8>(tmp2), 0)); \
+ signs0 = reinterpret_cast<v8i16>(__builtin_msa_pckev_b( \
+ reinterpret_cast<v16i8>(signs1), reinterpret_cast<v16i8>(signs0))); \
+ out = reinterpret_cast<v16i8>(signs0); \
+ }
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToUint8,
+ RegBufferInt32<16>> {
+ typedef RegBufferInt32<16> InputType;
+ typedef RegBufferUint8<16> OutputType;
+
+ typedef OutputStageSaturatingCastToUint8 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ GEMMLOWP_MIPS_SAT16(output.reg[0], input.reg[0], input.reg[1], input.reg[2],
+ input.reg[3]);
+ return output;
+ }
+};
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToUint8,
+ RegBufferInt32<32>> {
+ typedef RegBufferInt32<32> InputType;
+ typedef RegBufferUint8<32> OutputType;
+
+ typedef OutputStageSaturatingCastToUint8 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ GEMMLOWP_MIPS_SAT16(output.reg[0], input.reg[0], input.reg[1], input.reg[2],
+ input.reg[3]);
+ GEMMLOWP_MIPS_SAT16(output.reg[1], input.reg[4], input.reg[5], input.reg[6],
+ input.reg[7]);
+ return output;
+ }
+};
+
+#undef GEMMLOWP_MIPS_SAT16
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt32<4, 1>, DstType> {
+ static void Run(const RegBlockInt32<4, 1>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ StoreInt32x4(dst->data(row, col), src.buf.reg[0]);
+ } else {
+ *dst->data(row + 0, col) = GetLane<0>(src.buf.reg[0]);
+ *dst->data(row + 1, col) = GetLane<1>(src.buf.reg[0]);
+ *dst->data(row + 2, col) = GetLane<2>(src.buf.reg[0]);
+ *dst->data(row + 3, col) = GetLane<3>(src.buf.reg[0]);
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt32<8, 1>, DstType> {
+ static void Run(const RegBlockInt32<8, 1>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ StoreInt32x4(dst->data(row, col), src.buf.reg[0]);
+ StoreInt32x4(dst->data(row + 4, col), src.buf.reg[1]);
+ } else {
+ *dst->data(row + 0, col) = GetLane<0>(src.buf.reg[0]);
+ *dst->data(row + 1, col) = GetLane<1>(src.buf.reg[0]);
+ *dst->data(row + 2, col) = GetLane<2>(src.buf.reg[0]);
+ *dst->data(row + 3, col) = GetLane<3>(src.buf.reg[0]);
+ *dst->data(row + 4, col) = GetLane<0>(src.buf.reg[1]);
+ *dst->data(row + 5, col) = GetLane<1>(src.buf.reg[1]);
+ *dst->data(row + 6, col) = GetLane<2>(src.buf.reg[1]);
+ *dst->data(row + 7, col) = GetLane<3>(src.buf.reg[1]);
+ }
+ }
+};
+
+inline RegBlockInt32<4, 4> Transpose(const RegBlockInt32<4, 4>& src) {
+ RegBlockInt32<4, 4> result;
+ v4i32 tmp0, tmp1;
+ tmp0 = __builtin_msa_ilvr_w(src.buf.reg[1], src.buf.reg[0]);
+ tmp1 = __builtin_msa_ilvr_w(src.buf.reg[3], src.buf.reg[2]);
+ result.buf.reg[0] = reinterpret_cast<v4i32>(__builtin_msa_ilvr_d(
+ reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0)));
+ result.buf.reg[1] = reinterpret_cast<v4i32>(__builtin_msa_ilvl_d(
+ reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0)));
+ tmp0 = __builtin_msa_ilvl_w(src.buf.reg[1], src.buf.reg[0]);
+ tmp1 = __builtin_msa_ilvl_w(src.buf.reg[3], src.buf.reg[2]);
+ result.buf.reg[2] = reinterpret_cast<v4i32>(__builtin_msa_ilvr_d(
+ reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0)));
+ result.buf.reg[3] = reinterpret_cast<v4i32>(__builtin_msa_ilvl_d(
+ reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0)));
+ return result;
+}
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt32<4, 4>, DstType> {
+ static void Run(const RegBlockInt32<4, 4>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row, col + i), src.buf.reg[i]);
+ }
+ } else {
+ const auto transpose = Transpose(src);
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row + i, col), transpose.buf.reg[i]);
+ }
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt32<8, 4>, DstType> {
+ static void Run(const RegBlockInt32<8, 4>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row, col + i), src.buf.reg[2 * i]);
+ StoreInt32x4(dst->data(row + 4, col + i), src.buf.reg[2 * i + 1]);
+ }
+ } else {
+ RegBlockInt32<4, 4> top;
+ top.buf.reg[0] = src.buf.reg[0];
+ top.buf.reg[1] = src.buf.reg[2];
+ top.buf.reg[2] = src.buf.reg[4];
+ top.buf.reg[3] = src.buf.reg[6];
+ const auto transpose_top = Transpose(top);
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row + i, col), transpose_top.buf.reg[i]);
+ }
+ RegBlockInt32<4, 4> bottom;
+ bottom.buf.reg[0] = src.buf.reg[1];
+ bottom.buf.reg[1] = src.buf.reg[3];
+ bottom.buf.reg[2] = src.buf.reg[5];
+ bottom.buf.reg[3] = src.buf.reg[7];
+ const auto transpose_bottom = Transpose(bottom);
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row + 4 + i, col), transpose_bottom.buf.reg[i]);
+ }
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt32<8, 8>, DstType> {
+ static void Run(const RegBlockInt32<8, 8>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ for (int i = 0; i < 8; i++) {
+ StoreInt32x4(dst->data(row, col + i), src.buf.reg[2 * i]);
+ StoreInt32x4(dst->data(row + 4, col + i), src.buf.reg[2 * i + 1]);
+ }
+ } else {
+ RegBlockInt32<4, 4> top_left;
+ top_left.buf.reg[0] = src.buf.reg[0];
+ top_left.buf.reg[1] = src.buf.reg[2];
+ top_left.buf.reg[2] = src.buf.reg[4];
+ top_left.buf.reg[3] = src.buf.reg[6];
+ const auto transpose_top_left = Transpose(top_left);
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row + i, col), transpose_top_left.buf.reg[i]);
+ }
+ RegBlockInt32<4, 4> bottom_left;
+ bottom_left.buf.reg[0] = src.buf.reg[1];
+ bottom_left.buf.reg[1] = src.buf.reg[3];
+ bottom_left.buf.reg[2] = src.buf.reg[5];
+ bottom_left.buf.reg[3] = src.buf.reg[7];
+ const auto transpose_bottom_left = Transpose(bottom_left);
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row + 4 + i, col),
+ transpose_bottom_left.buf.reg[i]);
+ }
+ RegBlockInt32<4, 4> top_right;
+ top_right.buf.reg[0] = src.buf.reg[8];
+ top_right.buf.reg[1] = src.buf.reg[10];
+ top_right.buf.reg[2] = src.buf.reg[12];
+ top_right.buf.reg[3] = src.buf.reg[14];
+ const auto transpose_top_right = Transpose(top_right);
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row + i, col + 4),
+ transpose_top_right.buf.reg[i]);
+ }
+ RegBlockInt32<4, 4> bottom_right;
+ bottom_right.buf.reg[0] = src.buf.reg[9];
+ bottom_right.buf.reg[1] = src.buf.reg[11];
+ bottom_right.buf.reg[2] = src.buf.reg[13];
+ bottom_right.buf.reg[3] = src.buf.reg[15];
+ const auto transpose_bottom_right = Transpose(bottom_right);
+ for (int i = 0; i < 4; i++) {
+ StoreInt32x4(dst->data(row + 4 + i, col + 4),
+ transpose_bottom_right.buf.reg[i]);
+ }
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt32<1, 4>, DstType> {
+ static void Run(const RegBlockInt32<1, 4>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ *dst->data(row, col + 0) = GetLane<0>(src.buf.reg[0]);
+ *dst->data(row, col + 1) = GetLane<1>(src.buf.reg[0]);
+ *dst->data(row, col + 2) = GetLane<2>(src.buf.reg[0]);
+ *dst->data(row, col + 3) = GetLane<3>(src.buf.reg[0]);
+ } else {
+ StoreInt32x4(dst->data(row, col), src.buf.reg[0]);
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockUint8<4, 1>, DstType> {
+ static void Run(const RegBlockUint8<4, 1>& src, DstType* dst, int row,
+ int col) {
+ const std::uint32_t src_reg = src.buf.reg[0];
+ for (int i = 0; i < 4; i++) {
+ *dst->data(row + i, col) = (src_reg >> (8 * i));
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockUint8<8, 1>, DstType> {
+ static void Run(const RegBlockUint8<8, 1>& src, DstType* dst, int row,
+ int col) {
+ for (int i = 0; i < 4; i++) {
+ *dst->data(row + i, col) = (src.buf.reg[0] >> (8 * i));
+ }
+ for (int i = 0; i < 4; i++) {
+ *dst->data(row + 4 + i, col) = (src.buf.reg[1] >> (8 * i));
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockUint8<1, 4>, DstType> {
+ static void Run(const RegBlockUint8<1, 4>& src, DstType* dst, int row,
+ int col) {
+ for (int i = 0; i < 4; i++) {
+ *dst->data(row, col + i) = (src.buf.reg[0] >> (8 * i));
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockUint8<4, 4>, DstType> {
+ static void Run(const RegBlockUint8<4, 4>& src, DstType* dst, int row,
+ int col) {
+ std::uint8_t buf[16];
+ StoreUint8x16(buf, src.buf.reg[0]);
+ for (int c = 0; c < 4; c++) {
+ for (int r = 0; r < 4; r++) {
+ *dst->data(row + r, col + c) = buf[r + 4 * c];
+ }
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockUint8<8, 4>, DstType> {
+ static void Run(const RegBlockUint8<8, 4>& src, DstType* dst, int row,
+ int col) {
+ std::uint8_t buf[32];
+ StoreUint8x16(buf, src.buf.reg[0]);
+ StoreUint8x16(buf + 16, src.buf.reg[1]);
+ for (int c = 0; c < 4; c++) {
+ for (int r = 0; r < 8; r++) {
+ *dst->data(row + r, col + c) = buf[r + 8 * c];
+ }
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockUint8<8, 8>, DstType> {
+ static void Run(const RegBlockUint8<8, 8>& src, DstType* dst, int row,
+ int col) {
+ std::uint8_t buf[64];
+ StoreUint8x16(buf, src.buf.reg[0]);
+ StoreUint8x16(buf + 16, src.buf.reg[1]);
+ StoreUint8x16(buf + 32, src.buf.reg[2]);
+ StoreUint8x16(buf + 48, src.buf.reg[3]);
+ for (int c = 0; c < 8; c++) {
+ for (int r = 0; r < 8; r++) {
+ *dst->data(row + r, col + c) = buf[r + 8 * c];
+ }
+ }
+ }
+};
+
+} // namespace gemmlowp
+
+#endif // GEMMLOWP_INTERNAL_OUTPUT_MSA_H_
}
};
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegBufferInt32<4>> {
+ typedef RegBufferInt32<4> InputType;
+ typedef RegBufferInt16<4> OutputType;
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ output.reg[0] = vqmovn_s32(input.reg[0]);
+ return output;
+ }
+};
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegBufferInt32<8>> {
+ typedef RegBufferInt32<8> InputType;
+ typedef RegBufferInt16<8> OutputType;
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ output.reg[0] =
+ vcombine_s16(vqmovn_s32(input.reg[0]), vqmovn_s32(input.reg[1]));
+ return output;
+ }
+};
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegBufferInt32<16>> {
+ typedef RegBufferInt32<16> InputType;
+ typedef RegBufferInt16<16> OutputType;
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ output.reg[0] =
+ vcombine_s16(vqmovn_s32(input.reg[0]), vqmovn_s32(input.reg[1]));
+ output.reg[1] =
+ vcombine_s16(vqmovn_s32(input.reg[2]), vqmovn_s32(input.reg[3]));
+ return output;
+ }
+};
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegBufferInt32<32>> {
+ typedef RegBufferInt32<32> InputType;
+ typedef RegBufferInt16<32> OutputType;
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ output.reg[0] =
+ vcombine_s16(vqmovn_s32(input.reg[0]), vqmovn_s32(input.reg[1]));
+ output.reg[1] =
+ vcombine_s16(vqmovn_s32(input.reg[2]), vqmovn_s32(input.reg[3]));
+ output.reg[2] =
+ vcombine_s16(vqmovn_s32(input.reg[4]), vqmovn_s32(input.reg[5]));
+ output.reg[3] =
+ vcombine_s16(vqmovn_s32(input.reg[6]), vqmovn_s32(input.reg[7]));
+ return output;
+ }
+};
+
template <typename DstType>
struct StoreFinalOutputImpl<RegBlockInt32<8, 1>, DstType> {
static void Run(const RegBlockInt32<8, 1>& src, DstType* dst, int row,
StoreInt32x4(dst->data(row, col), src.buf.reg[0]);
StoreInt32x4(dst->data(row + 4, col), src.buf.reg[1]);
} else {
- *dst->data(row + 0, col) = GetLane<0>(src.buf.reg[0]);
- *dst->data(row + 1, col) = GetLane<1>(src.buf.reg[0]);
- *dst->data(row + 2, col) = GetLane<2>(src.buf.reg[0]);
- *dst->data(row + 3, col) = GetLane<3>(src.buf.reg[0]);
- *dst->data(row + 4, col) = GetLane<0>(src.buf.reg[1]);
- *dst->data(row + 5, col) = GetLane<1>(src.buf.reg[1]);
- *dst->data(row + 6, col) = GetLane<2>(src.buf.reg[1]);
- *dst->data(row + 7, col) = GetLane<3>(src.buf.reg[1]);
+ vst1q_lane_s32(dst->data(row + 0, col), src.buf.reg[0], 0);
+ vst1q_lane_s32(dst->data(row + 1, col), src.buf.reg[0], 1);
+ vst1q_lane_s32(dst->data(row + 2, col), src.buf.reg[0], 2);
+ vst1q_lane_s32(dst->data(row + 3, col), src.buf.reg[0], 3);
+ vst1q_lane_s32(dst->data(row + 4, col), src.buf.reg[1], 0);
+ vst1q_lane_s32(dst->data(row + 5, col), src.buf.reg[1], 1);
+ vst1q_lane_s32(dst->data(row + 6, col), src.buf.reg[1], 2);
+ vst1q_lane_s32(dst->data(row + 7, col), src.buf.reg[1], 3);
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<4, 1>, DstType> {
+ static void Run(const RegBlockInt16<4, 1>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ StoreInt16x4(dst->data(row, col), src.buf.reg[0]);
+ } else {
+ vst1_lane_s16(dst->data(row + 0, col), src.buf.reg[0], 0);
+ vst1_lane_s16(dst->data(row + 1, col), src.buf.reg[0], 1);
+ vst1_lane_s16(dst->data(row + 2, col), src.buf.reg[0], 2);
+ vst1_lane_s16(dst->data(row + 3, col), src.buf.reg[0], 3);
+ }
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<8, 1>, DstType> {
+ static void Run(const RegBlockInt16<8, 1>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ StoreInt16x8(dst->data(row, col), src.buf.reg[0]);
+ } else {
+ vst1q_lane_s16(dst->data(row + 0, col), src.buf.reg[0], 0);
+ vst1q_lane_s16(dst->data(row + 1, col), src.buf.reg[0], 1);
+ vst1q_lane_s16(dst->data(row + 2, col), src.buf.reg[0], 2);
+ vst1q_lane_s16(dst->data(row + 3, col), src.buf.reg[0], 3);
+ vst1q_lane_s16(dst->data(row + 4, col), src.buf.reg[0], 4);
+ vst1q_lane_s16(dst->data(row + 5, col), src.buf.reg[0], 5);
+ vst1q_lane_s16(dst->data(row + 6, col), src.buf.reg[0], 6);
+ vst1q_lane_s16(dst->data(row + 7, col), src.buf.reg[0], 7);
}
}
};
}
};
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<4, 4>, DstType> {
+ static void Run(const RegBlockInt16<4, 4>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ vst1_s16(dst->data(row, col + 0), vget_low_s16(src.buf.reg[0]));
+ vst1_s16(dst->data(row, col + 1), vget_high_s16(src.buf.reg[0]));
+ vst1_s16(dst->data(row, col + 2), vget_low_s16(src.buf.reg[1]));
+ vst1_s16(dst->data(row, col + 3), vget_high_s16(src.buf.reg[1]));
+ } else {
+ const int16x4x2_t t0 =
+ vtrn_s16(vget_low_s16(src.buf.reg[0]), vget_high_s16(src.buf.reg[0]));
+ const int16x4x2_t t1 =
+ vtrn_s16(vget_low_s16(src.buf.reg[1]), vget_high_s16(src.buf.reg[1]));
+ const int32x4x2_t t =
+ vtrnq_s32(vreinterpretq_s32_s16(vcombine_s16(t0.val[0], t0.val[1])),
+ vreinterpretq_s32_s16(vcombine_s16(t1.val[0], t1.val[1])));
+ vst1_s16(dst->data(row + 0, col),
+ vget_low_s16(vreinterpretq_s16_s32(t.val[0])));
+ vst1_s16(dst->data(row + 1, col),
+ vget_high_s16(vreinterpretq_s16_s32(t.val[0])));
+ vst1_s16(dst->data(row + 2, col),
+ vget_low_s16(vreinterpretq_s16_s32(t.val[1])));
+ vst1_s16(dst->data(row + 3, col),
+ vget_high_s16(vreinterpretq_s16_s32(t.val[1])));
+ }
+ }
+};
+
template <typename DstType>
struct StoreFinalOutputImpl<RegBlockInt32<8, 4>, DstType> {
static void Run(const RegBlockInt32<8, 4>& src, DstType* dst, int row,
}
};
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<8, 4>, DstType> {
+ static void Run(const RegBlockInt16<8, 4>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ vst1q_s16(dst->data(row, col + 0), src.buf.reg[0]);
+ vst1q_s16(dst->data(row, col + 1), src.buf.reg[1]);
+ vst1q_s16(dst->data(row, col + 2), src.buf.reg[2]);
+ vst1q_s16(dst->data(row, col + 3), src.buf.reg[3]);
+ } else {
+ const int16x8x2_t t0 = vtrnq_s16(src.buf.reg[0], src.buf.reg[1]);
+ const int16x8x2_t t1 = vtrnq_s16(src.buf.reg[2], src.buf.reg[3]);
+ const int32x4x2_t u0 = vtrnq_s32(vreinterpretq_s32_s16(t0.val[0]),
+ vreinterpretq_s32_s16(t1.val[0]));
+ const int32x4x2_t u1 = vtrnq_s32(vreinterpretq_s32_s16(t0.val[1]),
+ vreinterpretq_s32_s16(t1.val[1]));
+ vst1_s16(dst->data(row + 0, col),
+ vget_low_s16(vreinterpretq_s16_s32(u0.val[0])));
+ vst1_s16(dst->data(row + 1, col),
+ vget_low_s16(vreinterpretq_s16_s32(u1.val[0])));
+ vst1_s16(dst->data(row + 2, col),
+ vget_low_s16(vreinterpretq_s16_s32(u0.val[1])));
+ vst1_s16(dst->data(row + 3, col),
+ vget_low_s16(vreinterpretq_s16_s32(u1.val[1])));
+ vst1_s16(dst->data(row + 4, col),
+ vget_high_s16(vreinterpretq_s16_s32(u0.val[0])));
+ vst1_s16(dst->data(row + 5, col),
+ vget_high_s16(vreinterpretq_s16_s32(u1.val[0])));
+ vst1_s16(dst->data(row + 6, col),
+ vget_high_s16(vreinterpretq_s16_s32(u0.val[1])));
+ vst1_s16(dst->data(row + 7, col),
+ vget_high_s16(vreinterpretq_s16_s32(u1.val[1])));
+ }
+ }
+};
+
template <typename DstType>
struct StoreFinalOutputImpl<RegBlockInt32<8, 8>, DstType> {
static void Run(const RegBlockInt32<8, 8>& src, DstType* dst, int row,
}
};
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<1, 4>, DstType> {
+ static void Run(const RegBlockInt16<1, 4>& src, DstType* dst, int row,
+ int col) {
+ std::int16_t* dst_ptr = dst->data(row, col);
+ if (DstType::kOrder == MapOrder::RowMajor) {
+ vst1_s16(dst_ptr, src.buf.reg[0]);
+ } else {
+ int col_stride = dst->cols_stride();
+ vst1_lane_s16(dst_ptr + 0 * col_stride, src.buf.reg[0], 0);
+ vst1_lane_s16(dst_ptr + 1 * col_stride, src.buf.reg[0], 1);
+ vst1_lane_s16(dst_ptr + 2 * col_stride, src.buf.reg[0], 2);
+ vst1_lane_s16(dst_ptr + 3 * col_stride, src.buf.reg[0], 3);
+ }
+ }
+};
+
template <typename DstType>
struct StoreFinalOutputImpl<RegBlockUint8<4, 1>, DstType> {
static void Run(const RegBlockUint8<4, 1>& src, DstType* dst, int row,
}
};
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<8, 8>, DstType> {
+ static void Run(const RegBlockInt16<8, 8>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ vst1q_s16(dst->data(row, col + 0), src.buf.reg[0]);
+ vst1q_s16(dst->data(row, col + 1), src.buf.reg[1]);
+ vst1q_s16(dst->data(row, col + 2), src.buf.reg[2]);
+ vst1q_s16(dst->data(row, col + 3), src.buf.reg[3]);
+ vst1q_s16(dst->data(row, col + 4), src.buf.reg[4]);
+ vst1q_s16(dst->data(row, col + 5), src.buf.reg[5]);
+ vst1q_s16(dst->data(row, col + 6), src.buf.reg[6]);
+ vst1q_s16(dst->data(row, col + 7), src.buf.reg[7]);
+ } else {
+ int16x8x2_t a[4];
+ a[0] = vtrnq_s16(src.buf.reg[0], src.buf.reg[1]);
+ a[1] = vtrnq_s16(src.buf.reg[2], src.buf.reg[3]);
+ a[2] = vtrnq_s16(src.buf.reg[4], src.buf.reg[5]);
+ a[3] = vtrnq_s16(src.buf.reg[6], src.buf.reg[7]);
+ int32x4x2_t b[4];
+ b[0] = vtrnq_s32(vreinterpretq_s32_s16(a[0].val[0]),
+ vreinterpretq_s32_s16(a[1].val[0]));
+ b[1] = vtrnq_s32(vreinterpretq_s32_s16(a[0].val[1]),
+ vreinterpretq_s32_s16(a[1].val[1]));
+ b[2] = vtrnq_s32(vreinterpretq_s32_s16(a[2].val[0]),
+ vreinterpretq_s32_s16(a[3].val[0]));
+ b[3] = vtrnq_s32(vreinterpretq_s32_s16(a[2].val[1]),
+ vreinterpretq_s32_s16(a[3].val[1]));
+ vst1_s16(dst->data(row + 0, col + 0),
+ vget_low_s16(vreinterpretq_s16_s32(b[0].val[0])));
+ vst1_s16(dst->data(row + 0, col + 4),
+ vget_low_s16(vreinterpretq_s16_s32(b[2].val[0])));
+ vst1_s16(dst->data(row + 1, col + 0),
+ vget_low_s16(vreinterpretq_s16_s32(b[1].val[0])));
+ vst1_s16(dst->data(row + 1, col + 4),
+ vget_low_s16(vreinterpretq_s16_s32(b[3].val[0])));
+ vst1_s16(dst->data(row + 2, col + 0),
+ vget_low_s16(vreinterpretq_s16_s32(b[0].val[1])));
+ vst1_s16(dst->data(row + 2, col + 4),
+ vget_low_s16(vreinterpretq_s16_s32(b[2].val[1])));
+ vst1_s16(dst->data(row + 3, col + 0),
+ vget_low_s16(vreinterpretq_s16_s32(b[1].val[1])));
+ vst1_s16(dst->data(row + 3, col + 4),
+ vget_low_s16(vreinterpretq_s16_s32(b[3].val[1])));
+ vst1_s16(dst->data(row + 4, col + 0),
+ vget_high_s16(vreinterpretq_s16_s32(b[0].val[0])));
+ vst1_s16(dst->data(row + 4, col + 4),
+ vget_high_s16(vreinterpretq_s16_s32(b[2].val[0])));
+ vst1_s16(dst->data(row + 5, col + 0),
+ vget_high_s16(vreinterpretq_s16_s32(b[1].val[0])));
+ vst1_s16(dst->data(row + 5, col + 4),
+ vget_high_s16(vreinterpretq_s16_s32(b[3].val[0])));
+ vst1_s16(dst->data(row + 6, col + 0),
+ vget_high_s16(vreinterpretq_s16_s32(b[0].val[1])));
+ vst1_s16(dst->data(row + 6, col + 4),
+ vget_high_s16(vreinterpretq_s16_s32(b[2].val[1])));
+ vst1_s16(dst->data(row + 7, col + 0),
+ vget_high_s16(vreinterpretq_s16_s32(b[1].val[1])));
+ vst1_s16(dst->data(row + 7, col + 4),
+ vget_high_s16(vreinterpretq_s16_s32(b[3].val[1])));
+ }
+ }
+};
+
} // namespace gemmlowp
#endif // GEMMLOWP_INTERNAL_OUTPUT_NEON_H_
}
};
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegBufferInt32<4>> {
+ typedef RegBufferInt32<4> InputType;
+ typedef RegBufferInt16<4> OutputType;
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ __m128i res_16 = _mm_packs_epi32(input.reg[0], input.reg[0]);
+ output.reg[0] = _mm_extract_epi16(res_16, 0);
+ output.reg[1] = _mm_extract_epi16(res_16, 1);
+ output.reg[2] = _mm_extract_epi16(res_16, 2);
+ output.reg[3] = _mm_extract_epi16(res_16, 3);
+ return output;
+ }
+};
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegBufferInt32<8>> {
+ typedef RegBufferInt32<8> InputType;
+ typedef RegBufferInt16<8> OutputType;
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ output.reg[0] = _mm_packs_epi32(input.reg[0], input.reg[1]);
+ return output;
+ }
+};
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegBufferInt32<16>> {
+ typedef RegBufferInt32<16> InputType;
+ typedef RegBufferInt16<16> OutputType;
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ output.reg[0] = _mm_packs_epi32(input.reg[0], input.reg[1]);
+ output.reg[1] = _mm_packs_epi32(input.reg[2], input.reg[3]);
+ return output;
+ }
+};
+
+template <>
+struct OutputStageEvalBufferImpl<OutputStageSaturatingCastToInt16,
+ RegBufferInt32<32>> {
+ typedef RegBufferInt32<32> InputType;
+ typedef RegBufferInt16<32> OutputType;
+
+ typedef OutputStageSaturatingCastToInt16 OutputStage;
+
+ OutputStageEvalBufferImpl(const OutputStage&) {}
+
+ OutputType Eval(InputType input) const {
+ OutputType output;
+ output.reg[0] = _mm_packs_epi32(input.reg[0], input.reg[1]);
+ output.reg[1] = _mm_packs_epi32(input.reg[2], input.reg[3]);
+ output.reg[2] = _mm_packs_epi32(input.reg[4], input.reg[5]);
+ output.reg[3] = _mm_packs_epi32(input.reg[6], input.reg[7]);
+ return output;
+ }
+};
+
template <typename DstType>
struct StoreFinalOutputImpl<RegBlockInt32<4, 1>, DstType> {
static void Run(const RegBlockInt32<4, 1>& src, DstType* dst, int row,
}
};
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<4, 1>, DstType> {
+ static void Run(const RegBlockInt16<4, 1>& src, DstType* dst, int row,
+ int col) {
+ *dst->data(row + 0, col) = src.buf.reg[0];
+ *dst->data(row + 1, col) = src.buf.reg[1];
+ *dst->data(row + 2, col) = src.buf.reg[2];
+ *dst->data(row + 3, col) = src.buf.reg[3];
+ }
+};
+
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<8, 1>, DstType> {
+ static void Run(const RegBlockInt16<8, 1>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ StoreInt16x8(dst->data(row, col), src.buf.reg[0]);
+ } else {
+ *dst->data(row + 0, col) = _mm_extract_epi16(src.buf.reg[0], 0);
+ *dst->data(row + 1, col) = _mm_extract_epi16(src.buf.reg[0], 1);
+ *dst->data(row + 2, col) = _mm_extract_epi16(src.buf.reg[0], 2);
+ *dst->data(row + 3, col) = _mm_extract_epi16(src.buf.reg[0], 3);
+ *dst->data(row + 4, col) = _mm_extract_epi16(src.buf.reg[0], 4);
+ *dst->data(row + 5, col) = _mm_extract_epi16(src.buf.reg[0], 5);
+ *dst->data(row + 6, col) = _mm_extract_epi16(src.buf.reg[0], 6);
+ *dst->data(row + 7, col) = _mm_extract_epi16(src.buf.reg[0], 7);
+ }
+ }
+};
+
inline RegBlockInt32<4, 4> Transpose(const RegBlockInt32<4, 4>& src) {
__m128i t0 = _mm_unpacklo_epi32(src.buf.reg[0], src.buf.reg[1]);
__m128i t1 = _mm_unpacklo_epi32(src.buf.reg[2], src.buf.reg[3]);
}
};
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<4, 4>, DstType> {
+ static void Run(const RegBlockInt16<4, 4>& src, DstType* dst, int row,
+ int col) {
+ std::int16_t buf[16];
+ StoreInt16x8(buf + 0, src.buf.reg[0]);
+ StoreInt16x8(buf + 8, src.buf.reg[1]);
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ *dst->data(row + i, col + j) = buf[i + 4 * j];
+ }
+ }
+ }
+};
+
template <typename DstType>
struct StoreFinalOutputImpl<RegBlockInt32<8, 4>, DstType> {
static void Run(const RegBlockInt32<8, 4>& src, DstType* dst, int row,
}
};
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<8, 4>, DstType> {
+ static void Run(const RegBlockInt16<8, 4>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ for (int i = 0; i < 4; i++) {
+ StoreInt16x8(dst->data(row, col + i), src.buf.reg[i]);
+ }
+ } else {
+ std::int16_t buf[32];
+ StoreInt16x8(buf + 0, src.buf.reg[0]);
+ StoreInt16x8(buf + 8, src.buf.reg[1]);
+ StoreInt16x8(buf + 16, src.buf.reg[2]);
+ StoreInt16x8(buf + 24, src.buf.reg[3]);
+ for (int i = 0; i < 8; i++) {
+ for (int j = 0; j < 4; j++) {
+ *dst->data(row + i, col + j) = buf[i + 8 * j];
+ }
+ }
+ }
+ }
+};
+
template <typename DstType>
struct StoreFinalOutputImpl<RegBlockInt32<8, 8>, DstType> {
static void Run(const RegBlockInt32<8, 8>& src, DstType* dst, int row,
}
};
+template <typename DstType>
+struct StoreFinalOutputImpl<RegBlockInt16<8, 8>, DstType> {
+ static void Run(const RegBlockInt16<8, 8>& src, DstType* dst, int row,
+ int col) {
+ if (DstType::kOrder == MapOrder::ColMajor) {
+ for (int i = 0; i < 8; i++) {
+ StoreInt16x8(dst->data(row, col + i), src.buf.reg[i]);
+ }
+ } else {
+ // top-left 4x4
+ __m128i t0 = _mm_unpacklo_epi16(src.buf.reg[0], src.buf.reg[1]);
+ __m128i t1 = _mm_unpacklo_epi16(src.buf.reg[2], src.buf.reg[3]);
+ __m128i u0 = _mm_unpacklo_epi32(t0, t1);
+ __m128i u1 = _mm_unpackhi_epi32(t0, t1);
+ // top-right 4x4
+ __m128i t2 = _mm_unpacklo_epi16(src.buf.reg[4], src.buf.reg[5]);
+ __m128i t3 = _mm_unpacklo_epi16(src.buf.reg[6], src.buf.reg[7]);
+ __m128i u2 = _mm_unpacklo_epi32(t2, t3);
+ __m128i u3 = _mm_unpackhi_epi32(t2, t3);
+ // bottom-left 4x4
+ __m128i t4 = _mm_unpackhi_epi16(src.buf.reg[0], src.buf.reg[1]);
+ __m128i t5 = _mm_unpackhi_epi16(src.buf.reg[2], src.buf.reg[3]);
+ __m128i u4 = _mm_unpacklo_epi32(t4, t5);
+ __m128i u5 = _mm_unpackhi_epi32(t4, t5);
+ // bottom-right 4x4
+ __m128i t6 = _mm_unpackhi_epi16(src.buf.reg[4], src.buf.reg[5]);
+ __m128i t7 = _mm_unpackhi_epi16(src.buf.reg[6], src.buf.reg[7]);
+ __m128i u6 = _mm_unpacklo_epi32(t6, t7);
+ __m128i u7 = _mm_unpackhi_epi32(t6, t7);
+
+ StoreInt16x8(dst->data(row + 0, col), _mm_unpacklo_epi64(u0, u2));
+ StoreInt16x8(dst->data(row + 1, col), _mm_unpackhi_epi64(u0, u2));
+ StoreInt16x8(dst->data(row + 2, col), _mm_unpacklo_epi64(u1, u3));
+ StoreInt16x8(dst->data(row + 3, col), _mm_unpackhi_epi64(u1, u3));
+ StoreInt16x8(dst->data(row + 4, col), _mm_unpacklo_epi64(u4, u6));
+ StoreInt16x8(dst->data(row + 5, col), _mm_unpackhi_epi64(u4, u6));
+ StoreInt16x8(dst->data(row + 6, col), _mm_unpacklo_epi64(u5, u7));
+ StoreInt16x8(dst->data(row + 7, col), _mm_unpackhi_epi64(u5, u7));
+ }
+ }
+};
+
template <typename DstType>
struct StoreFinalOutputImpl<RegBlockInt32<1, 4>, DstType> {
static void Run(const RegBlockInt32<1, 4>& src, DstType* dst, int row,
#include "pack_neon.h"
#elif defined(GEMMLOWP_SSE4)
#include "pack_sse.h"
+#elif defined(GEMMLOWP_MSA)
+#include "pack_msa.h"
#endif
#endif // GEMMLOWP_INTERNAL_PACK_H_
--- /dev/null
+// Copyright 2018 The Gemmlowp Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// pack_msa.h: optimized MSA specializations of the templates in pack.h.
+
+#ifndef GEMMLOWP_INTERNAL_PACK_MSA_H_
+#define GEMMLOWP_INTERNAL_PACK_MSA_H_
+
+#include "pack.h"
+
+#include <msa.h>
+
+namespace gemmlowp {
+
+typedef SideMap<const std::uint8_t, SideMapOrder::WidthMajor>
+ WidthMajorUint8SideMap;
+
+template <int Cells>
+using DepthMajorSideFormatNCells4x2 = KernelSideFormat<CellFormat<4, 2>, Cells>;
+
+template <int Cells>
+class PackingRegisterBlock<
+ WidthMajorUint8SideMap,
+ PackedSideBlock<DepthMajorSideFormatNCells4x2<Cells>>>
+ : public PackingRegisterBlockBase<
+ WidthMajorUint8SideMap,
+ PackedSideBlock<DepthMajorSideFormatNCells4x2<Cells>>> {
+ public:
+ typedef DepthMajorSideFormatNCells4x2<Cells> KernelSideFormat;
+ typedef typename KernelSideFormat::Cell CellFormat;
+ static constexpr int kCells = KernelSideFormat::kCells;
+ static const int kCellWidth = CellFormat::kWidth;
+ static const int kKernelWidth = CellFormat::kWidth * kCells;
+ static const int kCellDepth = CellFormat::kDepth;
+ static const int kCellSize = CellFormat::kSize;
+
+ void Pack(PackedSideBlock<KernelSideFormat>* dst, int start_width) {
+ std::uint8_t* dst_ptr = dst->current_data();
+ const std::uint8_t* const src_ptr = this->complete_src_.data();
+ const int stride = this->complete_src_.stride();
+ // Load source WidthMajor data
+ v16i8 src_lines[4 * kCells];
+ for (int i = 0; i < 4 * kCells; i++) {
+ src_lines[i] = __builtin_msa_ld_b(
+ const_cast<std::uint8_t*>(src_ptr + i * stride), 0);
+ }
+ // Reorder the data within registers to make DepthMajor 4x2 cells
+ v16i8 src_lines_intertwined_2x[2 * kCells][2];
+ for (int i = 0; i < kCells; i++) {
+ src_lines_intertwined_2x[2 * i][0] =
+ __builtin_msa_ilvr_b(src_lines[4 * i + 2], src_lines[4 * i]);
+ src_lines_intertwined_2x[2 * i][1] =
+ __builtin_msa_ilvl_b(src_lines[4 * i + 2], src_lines[4 * i]);
+ src_lines_intertwined_2x[2 * i + 1][0] =
+ __builtin_msa_ilvr_b(src_lines[4 * i + 3], src_lines[4 * i + 1]);
+ src_lines_intertwined_2x[2 * i + 1][1] =
+ __builtin_msa_ilvl_b(src_lines[4 * i + 3], src_lines[4 * i + 1]);
+ }
+ v16i8 src_lines_intertwined_4x[2 * kCells][2];
+ for (int i = 0; i < kCells; i++) {
+ src_lines_intertwined_4x[2 * i][0] =
+ __builtin_msa_ilvr_b(src_lines_intertwined_2x[2 * i + 1][0],
+ src_lines_intertwined_2x[2 * i][0]);
+ src_lines_intertwined_4x[2 * i][1] =
+ __builtin_msa_ilvl_b(src_lines_intertwined_2x[2 * i + 1][0],
+ src_lines_intertwined_2x[2 * i][0]);
+ src_lines_intertwined_4x[2 * i + 1][0] =
+ __builtin_msa_ilvr_b(src_lines_intertwined_2x[2 * i + 1][1],
+ src_lines_intertwined_2x[2 * i][1]);
+ src_lines_intertwined_4x[2 * i + 1][1] =
+ __builtin_msa_ilvl_b(src_lines_intertwined_2x[2 * i + 1][1],
+ src_lines_intertwined_2x[2 * i][1]);
+ }
+ // Store the resulting DepthMajor 4x2 cells in the destination packed block
+ for (int outer = 0; outer < 2; outer++) {
+ for (int inner = 0; inner < 2; inner++) {
+ if (kCells % 2 == 0) {
+ for (int cell = 0; cell < kCells; cell += 2) {
+ v2i64 tmp = __builtin_msa_ilvr_d(
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (cell + 1) + outer][inner]),
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ }
+ for (int cell = 0; cell < kCells; cell += 2) {
+ v2i64 tmp = __builtin_msa_ilvl_d(
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (cell + 1) + outer][inner]),
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ }
+ } else {
+ // Store even number of low vector halves.
+ for (int cell = 0; cell < kCells - 1; cell += 2) {
+ v2i64 tmp = __builtin_msa_ilvr_d(
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (cell + 1) + outer][inner]),
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ }
+ // Store last low half and first high half.
+ v2i64 tmp = reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * 0 + outer][inner]);
+ tmp = __builtin_msa_insve_d(
+ tmp, 0,
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (kCells - 1) + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ // Store even number of high vector halves.
+ for (int cell = 1; cell < kCells; cell += 2) {
+ v2i64 tmp = __builtin_msa_ilvl_d(
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (cell + 1) + outer][inner]),
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ }
+ }
+ }
+ }
+ // Compute sums across the depth dimension
+ v8i16 sums_of_2_cells[kCells][4];
+ const v16i8 zeroes = __builtin_msa_ldi_b(0);
+ for (int outer = 0; outer < 2; outer++) {
+ for (int inner = 0; inner < 2; inner++) {
+ int i = 2 * outer + inner;
+ for (int cell = 0; cell < kCells; cell++) {
+ v8i16 tmp0 = reinterpret_cast<v8i16>(__builtin_msa_ilvr_b(
+ zeroes, src_lines_intertwined_4x[2 * cell + outer][inner]));
+ v8i16 tmp1 = reinterpret_cast<v8i16>(__builtin_msa_ilvl_b(
+ zeroes, src_lines_intertwined_4x[2 * cell + outer][inner]));
+ sums_of_2_cells[cell][i] = __builtin_msa_addv_h(tmp0, tmp1);
+ }
+ }
+ }
+ v4i32 sums_of_4_cells[kCells][4];
+ for (int i = 0; i < 4; i++) {
+ for (int cell = 0; cell < kCells; cell++) {
+ v4i32 tmp0 = reinterpret_cast<v4i32>(__builtin_msa_ilvr_h(
+ reinterpret_cast<v8i16>(zeroes), sums_of_2_cells[cell][i]));
+ v4i32 tmp1 = reinterpret_cast<v4i32>(__builtin_msa_ilvl_h(
+ reinterpret_cast<v8i16>(zeroes), sums_of_2_cells[cell][i]));
+ sums_of_4_cells[cell][i] = __builtin_msa_addv_w(tmp0, tmp1);
+ }
+ }
+ // Update the sums_of_each_slice vector
+ for (int cell = 0; cell < kCells; cell++) {
+ v4i32 s01 = __builtin_msa_addv_w(sums_of_4_cells[cell][0],
+ sums_of_4_cells[cell][1]);
+ v4i32 s23 = __builtin_msa_addv_w(sums_of_4_cells[cell][2],
+ sums_of_4_cells[cell][3]);
+ v4i32 s = __builtin_msa_addv_w(s01, s23);
+ std::int32_t* sums_of_each_slice_ptr =
+ dst->sums_of_each_slice() + start_width + 4 * cell;
+ v4i32 tmp = __builtin_msa_ld_w(sums_of_each_slice_ptr, 0);
+ tmp = __builtin_msa_addv_w(tmp, s);
+ __builtin_msa_st_w(tmp, sums_of_each_slice_ptr, 0);
+ }
+ dst->seek_forward_n_cells(kCells * kRegisterSize / kCellDepth);
+ }
+};
+
+template <int Cells>
+using WidthMajorSideFormatNCells4x2 =
+ KernelSideFormat<CellFormat<4, 2, CellOrder::WidthMajor>, Cells>;
+
+template <int Cells>
+class PackingRegisterBlock<
+ WidthMajorUint8SideMap,
+ PackedSideBlock<WidthMajorSideFormatNCells4x2<Cells>>>
+ : public PackingRegisterBlockBase<
+ WidthMajorUint8SideMap,
+ PackedSideBlock<WidthMajorSideFormatNCells4x2<Cells>>> {
+ public:
+ typedef WidthMajorSideFormatNCells4x2<Cells> KernelSideFormat;
+ typedef typename KernelSideFormat::Cell CellFormat;
+ static constexpr int kCells = KernelSideFormat::kCells;
+ static const int kCellWidth = CellFormat::kWidth;
+ static const int kKernelWidth = CellFormat::kWidth * kCells;
+ static const int kCellDepth = CellFormat::kDepth;
+ static const int kCellSize = CellFormat::kSize;
+
+ void Pack(PackedSideBlock<KernelSideFormat>* dst, int start_width) {
+ std::uint8_t* dst_ptr = dst->current_data();
+ const std::uint8_t* src_ptr = this->complete_src_.data();
+ const int stride = this->complete_src_.stride();
+ // Load source WidthMajor data
+ v8i16 src_lines[kCells * 4];
+ for (int i = 0; i < kCells; i++) {
+#define GEMMLOWP_UNROLLED_LOOP_ITER(k) \
+ src_lines[4 * i + k] = \
+ __builtin_msa_ld_h(const_cast<std::uint8_t*>(src_ptr), 0); \
+ src_ptr += stride;
+
+ GEMMLOWP_UNROLLED_LOOP_ITER(0)
+ GEMMLOWP_UNROLLED_LOOP_ITER(1)
+ GEMMLOWP_UNROLLED_LOOP_ITER(2)
+ GEMMLOWP_UNROLLED_LOOP_ITER(3)
+
+#undef GEMMLOWP_UNROLLED_LOOP_ITER
+ }
+ // Reorder the data within registers to make WidthMajor 4x2 cells
+ v8i16 src_lines_intertwined_2x[2 * kCells][2];
+ for (int i = 0; i < kCells; i++) {
+ src_lines_intertwined_2x[2 * i][0] =
+ __builtin_msa_ilvr_h(src_lines[4 * i + 2], src_lines[4 * i]);
+ src_lines_intertwined_2x[2 * i][1] =
+ __builtin_msa_ilvl_h(src_lines[4 * i + 2], src_lines[4 * i]);
+ src_lines_intertwined_2x[2 * i + 1][0] =
+ __builtin_msa_ilvr_h(src_lines[4 * i + 3], src_lines[4 * i + 1]);
+ src_lines_intertwined_2x[2 * i + 1][1] =
+ __builtin_msa_ilvl_h(src_lines[4 * i + 3], src_lines[4 * i + 1]);
+ }
+ v8i16 src_lines_intertwined_4x[2 * kCells][2];
+ for (int i = 0; i < kCells; i++) {
+ src_lines_intertwined_4x[2 * i][0] =
+ __builtin_msa_ilvr_h(src_lines_intertwined_2x[2 * i + 1][0],
+ src_lines_intertwined_2x[2 * i][0]);
+ src_lines_intertwined_4x[2 * i][1] =
+ __builtin_msa_ilvl_h(src_lines_intertwined_2x[2 * i + 1][0],
+ src_lines_intertwined_2x[2 * i][0]);
+ src_lines_intertwined_4x[2 * i + 1][0] =
+ __builtin_msa_ilvr_h(src_lines_intertwined_2x[2 * i + 1][1],
+ src_lines_intertwined_2x[2 * i][1]);
+ src_lines_intertwined_4x[2 * i + 1][1] =
+ __builtin_msa_ilvl_h(src_lines_intertwined_2x[2 * i + 1][1],
+ src_lines_intertwined_2x[2 * i][1]);
+ }
+ // Store the resulting WidthMajor 4x2 cells in the destination packed block
+ for (int outer = 0; outer < 2; outer++) {
+ for (int inner = 0; inner < 2; inner++) {
+ if (kCells % 2 == 0) {
+ for (int cell = 0; cell < kCells; cell += 2) {
+ v2i64 tmp = __builtin_msa_ilvr_d(
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (cell + 1) + outer][inner]),
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ }
+ for (int cell = 0; cell < kCells; cell += 2) {
+ v2i64 tmp = __builtin_msa_ilvl_d(
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (cell + 1) + outer][inner]),
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ }
+ } else {
+ // Store even number of low vector halves.
+ for (int cell = 0; cell < kCells - 1; cell += 2) {
+ v2i64 tmp = __builtin_msa_ilvr_d(
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (cell + 1) + outer][inner]),
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ }
+ // Store last low half and first high half.
+ v2i64 tmp = reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * 0 + outer][inner]);
+ tmp = __builtin_msa_insve_d(
+ tmp, 0,
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (kCells - 1) + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ // Store even number of high vector halves.
+ for (int cell = 1; cell < kCells; cell += 2) {
+ v2i64 tmp = __builtin_msa_ilvl_d(
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * (cell + 1) + outer][inner]),
+ reinterpret_cast<v2i64>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]));
+ __builtin_msa_st_b(reinterpret_cast<v16i8>(tmp), dst_ptr, 0);
+ dst_ptr += 16;
+ }
+ }
+ }
+ }
+ // Compute sums across the depth dimension
+ v8i16 sums_of_2[kCells][4];
+ for (int outer = 0; outer < 2; outer++) {
+ for (int inner = 0; inner < 2; inner++) {
+ int i = 2 * outer + inner;
+ for (int cell = 0; cell < kCells; cell++) {
+ sums_of_2[cell][i] = reinterpret_cast<v8i16>(__builtin_msa_hadd_u_h(
+ reinterpret_cast<v16u8>(
+ src_lines_intertwined_4x[2 * cell + outer][inner]),
+ reinterpret_cast<v16u8>(
+ src_lines_intertwined_4x[2 * cell + outer][inner])));
+ }
+ }
+ }
+ v8i16 sums_of_4[kCells][2];
+ for (int i = 0; i < 2; i++) {
+ for (int cell = 0; cell < kCells; cell++) {
+ sums_of_4[cell][i] = __builtin_msa_addv_h(sums_of_2[cell][2 * i],
+ sums_of_2[cell][2 * i + 1]);
+ }
+ }
+ v8i16 sums_of_8[kCells];
+ for (int cell = 0; cell < kCells; cell++) {
+ sums_of_8[cell] =
+ __builtin_msa_addv_h(sums_of_4[cell][0], sums_of_4[cell][1]);
+ }
+
+ v4i32 sums_of_16[kCells];
+ const v8i16 zeroes = __builtin_msa_ldi_h(0);
+ for (int cell = 0; cell < kCells; cell++) {
+ sums_of_16[cell] = reinterpret_cast<v4i32>(
+ __builtin_msa_ilvr_h(zeroes, sums_of_8[cell]));
+ v8i16 tmp = __builtin_msa_ilvl_h(zeroes, sums_of_8[cell]);
+ sums_of_16[cell] =
+ __builtin_msa_addv_w(sums_of_16[cell], reinterpret_cast<v4i32>(tmp));
+ }
+ // Update the sums_of_each_slice vector
+ for (int cell = 0; cell < kCells; cell++) {
+ std::int32_t* sums_of_each_slice_ptr =
+ dst->sums_of_each_slice() + start_width + 4 * cell;
+ v4i32 tmp = __builtin_msa_ld_w(sums_of_each_slice_ptr, 0);
+ tmp = __builtin_msa_addv_w(tmp, sums_of_16[cell]);
+ __builtin_msa_st_w(tmp, sums_of_each_slice_ptr, 0);
+ }
+ dst->seek_forward_n_cells(kCells * kRegisterSize / kCellDepth);
+ }
+};
+
+} // namespace gemmlowp
+
+#endif // GEMMLOWP_INTERNAL_PACK_MSA_H_
// Load source WidthMajor data
uint16x8_t src_lines[kCells * 4];
for (int i = 0; i < kCells; i++) {
-// This packing path is used with our current
-// less-than-8-bit kernel, and the partial unrolling of this loop
-// results in substantially faster code (thanks to better
-// register allocation) on Nexus 5.
+ // This packing path is used with our current
+ // less-than-8-bit kernel, and the partial unrolling of this loop
+ // results in substantially faster code (thanks to better
+ // register allocation) on Nexus 5.
#define GEMMLOWP_UNROLLED_LOOP_ITER(k) \
src_lines[4 * i + k] = vreinterpretq_u16_u8(vld1q_u8(src_ptr)); \
--- /dev/null
+// Copyright 2015 The Gemmlowp Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// internal/platform.h: a place to put platform specific code
+
+#ifndef GEMMLOWP_INTERNAL_PLATFORM_H_
+#define GEMMLOWP_INTERNAL_PLATFORM_H_
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <stdlib.h>
+#include <time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __APPLE__
+#include <sys/time.h>
+#endif
+
+#if defined ANDROID || defined __ANDROID__
+#include <malloc.h>
+#include <android/api-level.h>
+// The 18 here should be 16, but has to be 18 for now due
+// to a Google-internal issue.
+#if __ANDROID_API__ < 18
+#define GEMMLOWP_USE_MEMALIGN
+#endif
+// posix_memalign is missing on some 4.1 x86 devices
+#if __ANDROID_API__ == 18
+#ifdef GEMMLOWP_X86_32
+#define GEMMLOWP_USE_MEMALIGN
+#endif
+#endif
+#endif
+
+// Needed by chrome native builds
+#ifndef _SC_NPROCESSORS_CONF
+#define _SC_NPROCESSORS_CONF _SC_NPROCESSORS_ONLN
+#endif
+
+namespace gemmlowp {
+
+#ifdef _WIN32
+inline void *aligned_alloc(size_t alignment, size_t size) {
+ return _aligned_malloc(size, alignment);
+}
+
+inline void aligned_free(void *memptr) { _aligned_free(memptr); }
+
+inline int GetHardwareConcurrency(int max_threads) {
+ if (max_threads == 0) {
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+ return sysinfo.dwNumberOfProcessors;
+ }
+ return max_threads;
+}
+
+inline double real_time_in_seconds() {
+ __int64 wintime;
+ GetSystemTimeAsFileTime((FILETIME *)&wintime);
+ wintime -= 116444736000000000i64; // 1jan1601 to 1jan1970
+ return wintime / 10000000i64 + wintime % 10000000i64 * 100 * 1e-9;
+}
+
+#else
+inline void *aligned_alloc(size_t alignment, size_t size) {
+#ifdef GEMMLOWP_USE_MEMALIGN
+ return memalign(alignment, size);
+#else
+ void *memptr;
+ if (posix_memalign(&memptr, alignment, size)) {
+ memptr = nullptr;
+ }
+ return memptr;
+#endif
+}
+
+inline int GetHardwareConcurrency(int max_threads) {
+ if (max_threads == 0) {
+ static const int hardware_threads_count =
+ static_cast<int>(sysconf(_SC_NPROCESSORS_CONF));
+ return hardware_threads_count;
+ }
+ return max_threads;
+}
+
+inline void aligned_free(void *memptr) { free(memptr); }
+
+inline double real_time_in_seconds() {
+#ifdef __APPLE__
+ timeval t;
+ gettimeofday(&t, nullptr);
+ return t.tv_sec + 1e-6 * t.tv_usec;
+#else
+ timespec t;
+ clock_gettime(CLOCK_REALTIME, &t);
+ return t.tv_sec + 1e-9 * t.tv_nsec;
+#endif
+}
+
+#endif
+} // namespace gemmlowp
+#endif // GEMMLOWP_INTERNAL_PLATFORM_H_
template <int N>
using RegBufferInt32 = RegisterBuffer<std::int32_t, N>;
template <int N>
+using RegBufferInt16 = RegisterBuffer<std::int16_t, N>;
+template <int N>
using RegBufferUint8 = RegisterBuffer<std::uint8_t, N>;
template <int R, int C>
using RegBlockInt32 = RegisterBlock<std::int32_t, R, C>;
template <int R, int C>
+using RegBlockInt16 = RegisterBlock<std::int16_t, R, C>;
+template <int R, int C>
using RegBlockUint8 = RegisterBlock<std::uint8_t, R, C>;
} // end namespace gemmlowp
#include "simd_wrappers_neon.h"
#elif defined GEMMLOWP_SSE4
#include "simd_wrappers_sse.h"
+#elif defined GEMMLOWP_MSA
+#include "simd_wrappers_msa.h"
#endif
#endif // GEMMLOWP_INTERNAL_SIMD_WRAPPERS_H_
--- /dev/null
+// Copyright 2018 The Gemmlowp Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// simd_wrappers_msa.h: MSA specialization of simd_wrappers.h
+
+#ifndef GEMMLOWP_INTERNAL_SIMD_WRAPPERS_MSA_H_
+#define GEMMLOWP_INTERNAL_SIMD_WRAPPERS_MSA_H_
+
+#include <msa.h>
+
+namespace gemmlowp {
+
+using Int32x4 = v4i32;
+using Uint8x16 = v16i8;
+
+template <int ScalarCount>
+struct RegisterType<std::int32_t, ScalarCount> {
+ using Type =
+ typename std::conditional<ScalarCount >= 4, Int32x4, std::int32_t>::type;
+};
+
+template <int ScalarCount>
+struct RegisterType<std::uint8_t, ScalarCount> {
+ using Type = typename std::conditional<
+ ScalarCount >= 16, Uint8x16,
+ typename std::conditional<ScalarCount >= 4, std::uint32_t,
+ std::uint8_t>::type>::type;
+};
+
+inline Int32x4 LoadInt32x4(const std::int32_t* src) {
+ return __builtin_msa_ld_w(const_cast<std::int32_t*>(src), 0);
+}
+
+inline Int32x4 LoadInt32x4(const Int32x4* src) {
+ return __builtin_msa_ld_w(const_cast<Int32x4*>(src), 0);
+}
+
+inline void StoreInt32x4(std::int32_t* dst, Int32x4 value) {
+ __builtin_msa_st_w(value, dst, 0);
+}
+
+inline void StoreInt32x4(Int32x4* dst, Int32x4 value) {
+ __builtin_msa_st_w(value, dst, 0);
+}
+
+inline Uint8x16 LoadUint8x16(const std::uint8_t* src) {
+ return __builtin_msa_ld_b(const_cast<std::uint8_t*>(src), 0);
+}
+
+inline Uint8x16 LoadUint8x16(const Uint8x16* src) {
+ return __builtin_msa_ld_b(const_cast<Uint8x16*>(src), 0);
+}
+
+inline void StoreUint8x16(std::uint8_t* dst, Uint8x16 value) {
+ __builtin_msa_st_b(value, dst, 0);
+}
+
+inline void StoreUint8x16(Uint8x16* dst, Uint8x16 value) {
+ __builtin_msa_st_b(value, dst, 0);
+}
+
+template <int Lane>
+std::int32_t GetLane(Int32x4 value) {
+ return __builtin_msa_copy_s_w(value, Lane);
+}
+
+template <int Lane>
+Int32x4 DupLane(Int32x4 value) {
+ static_assert(Lane >= 0 && Lane <= 3, "");
+ return __builtin_msa_splati_w(value, Lane);
+}
+
+inline Int32x4 Mul(Int32x4 a, std::int32_t b) {
+ return __builtin_msa_mulv_w(a, __builtin_msa_fill_w(b));
+}
+
+inline Int32x4 Min(Int32x4 a, Int32x4 b) { return __builtin_msa_min_s_w(a, b); }
+
+inline Int32x4 Max(Int32x4 a, Int32x4 b) { return __builtin_msa_max_s_w(a, b); }
+
+inline Int32x4 SaturatingRoundingDoublingHighMul(Int32x4 a, std::int32_t b) {
+ return __builtin_msa_mulr_q_w(a, __builtin_msa_fill_w(b));
+}
+
+template <int Lane>
+Int32x4 MulByRhsLane(Int32x4 a, Int32x4 b) {
+ static_assert(Lane >= 0 && Lane <= 3, "");
+ return __builtin_msa_mulv_w(a, __builtin_msa_splati_w(b, Lane));
+}
+
+inline void MulAdd(Int32x4 lhs, Int32x4 rhs, Int32x4* acc) {
+ Int32x4 tmp = LoadInt32x4(acc);
+ tmp = __builtin_msa_maddv_w(tmp, lhs, rhs);
+ StoreInt32x4(acc, tmp);
+}
+
+inline void MulAdd(Int32x4 lhs, std::int32_t rhs, Int32x4* acc) {
+ Int32x4 tmp = LoadInt32x4(acc);
+ tmp = __builtin_msa_maddv_w(tmp, lhs, __builtin_msa_fill_w(rhs));
+ StoreInt32x4(acc, tmp);
+}
+
+template <int Lane>
+inline void MulAddByRhsLane(Int32x4 lhs, Int32x4 rhs, Int32x4* acc) {
+ static_assert(Lane >= 0 && Lane <= 3, "");
+ Int32x4 tmp = LoadInt32x4(acc);
+ tmp = __builtin_msa_maddv_w(tmp, lhs, __builtin_msa_splati_w(rhs, Lane));
+ StoreInt32x4(acc, tmp);
+}
+
+template <>
+struct LoadContiguousImpl<RegBlockUint8<8, 8>> {
+ static RegBlockUint8<8, 8> Run(const std::uint8_t* src) {
+ RegBlockUint8<8, 8> result;
+ for (int i = 0; i < 4; i++) {
+ result.buf.reg[i] = LoadUint8x16(src + 16 * i);
+ }
+ return result;
+ }
+};
+
+template <>
+struct LoadContiguousImpl<RegBlockInt32<8, 8>> {
+ static RegBlockInt32<8, 8> Run(const std::int32_t* src) {
+ RegBlockInt32<8, 8> result;
+ for (int i = 0; i < 16; i++) {
+ result.buf.reg[i] = LoadInt32x4(src + 4 * i);
+ }
+ return result;
+ }
+};
+
+} // end namespace gemmlowp
+
+#include "simd_wrappers_common_neon_sse.h"
+
+#endif // GEMMLOWP_INTERNAL_SIMD_WRAPPERS_MSA_H_
namespace gemmlowp {
using Int32x4 = int32x4_t;
+using Int16x4 = int16x4_t;
+using Int16x8 = int16x8_t;
using Uint8x8 = uint8x8_t;
template <int ScalarCount>
typename std::conditional<ScalarCount >= 4, Int32x4, std::int32_t>::type;
};
+template <int ScalarCount>
+struct RegisterType<std::int16_t, ScalarCount> {
+ using Type = typename std::conditional<
+ ScalarCount >= 8, Int16x8,
+ typename std::conditional<ScalarCount >= 4, Int16x4,
+ std::int16_t>::type>::type;
+};
+
template <int ScalarCount>
struct RegisterType<std::uint8_t, ScalarCount> {
using Type = typename std::conditional<
};
inline Int32x4 LoadInt32x4(const std::int32_t* src) { return vld1q_s32(src); }
+inline Int16x4 LoadInt16x4(const std::int16_t* src) { return vld1_s16(src); }
+inline Int16x8 LoadInt16x8(const std::int16_t* src) { return vld1q_s16(src); }
inline void StoreInt32x4(std::int32_t* dst, Int32x4 value) {
vst1q_s32(dst, value);
}
+inline void StoreInt16x4(std::int16_t* dst, Int16x4 value) {
+ vst1_s16(dst, value);
+}
+
+inline void StoreInt16x8(std::int16_t* dst, Int16x8 value) {
+ vst1q_s16(dst, value);
+}
+
template <int Lane>
std::int32_t GetLane(Int32x4 value) {
return vgetq_lane_s32(value, Lane);
}
}
+template <>
+struct LoadContiguousImpl<RegBlockInt16<8, 8>> {
+ static RegBlockInt16<8, 8> Run(const std::int16_t* src) {
+ RegBlockInt16<8, 8> result;
+ for (int i = 0; i < 8; i++) {
+ result.buf.reg[i] = vld1q_s16(src + 8 * i);
+ }
+ return result;
+ }
+};
+
template <>
struct LoadContiguousImpl<RegBlockUint8<8, 8>> {
static RegBlockUint8<8, 8> Run(const std::uint8_t* src) {
namespace gemmlowp {
using Int32x4 = __m128i;
+using Int16x8 = __m128i;
using Uint8x16 = __m128i;
template <int ScalarCount>
typename std::conditional<ScalarCount >= 4, Int32x4, std::int32_t>::type;
};
+template <int ScalarCount>
+struct RegisterType<std::int16_t, ScalarCount> {
+ using Type =
+ typename std::conditional<ScalarCount >= 8, Int16x8, std::int16_t>::type;
+};
+
template <int ScalarCount>
struct RegisterType<std::uint8_t, ScalarCount> {
using Type = typename std::conditional<
return _mm_loadu_si128(reinterpret_cast<const Int32x4*>(src));
}
+inline Int32x4 LoadInt16x8(const std::int16_t* src) {
+ return _mm_loadu_si128(reinterpret_cast<const Int16x8*>(src));
+}
+
inline void StoreInt32x4(std::int32_t* dst, Int32x4 value) {
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst), value);
}
+inline void StoreInt16x8(std::int16_t* dst, Int16x8 value) {
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), value);
+}
+
inline Uint8x16 LoadUint8x16(const std::uint8_t* src) {
return _mm_loadu_si128(reinterpret_cast<const Uint8x16*>(src));
}
}
};
+template <>
+struct LoadContiguousImpl<RegBlockInt16<8, 8>> {
+ static RegBlockInt16<8, 8> Run(const std::int16_t* src) {
+ RegBlockInt16<8, 8> result;
+ for (int i = 0; i < 8; i++) {
+ result.buf.reg[i] = LoadInt16x8(src + 8 * i);
+ }
+ return result;
+ }
+};
+
} // end namespace gemmlowp
#include "simd_wrappers_common_neon_sse.h"
Allocator* allocator = context->allocator();
BlockParams block_params;
- block_params.Init<KernelFormat>(rows, cols, depth, 1,
- context->l1_bytes_to_use(),
- context->l2_bytes_to_use(),
- context->l2_rhs_factor());
+ block_params.Init<KernelFormat>(
+ rows, cols, depth, 1, context->l1_bytes_to_use(),
+ context->l2_bytes_to_use(), context->l2_rhs_factor());
#ifdef GEMMLOWP_PROFILING_SIZES
// Using a static map of label strings. Not reentrant at all!
#ifndef GEMMLOWP_PROFILING_INSTRUMENTATION_H_
#define GEMMLOWP_PROFILING_INSTRUMENTATION_H_
-#include <pthread.h>
#include <cstdio>
#ifndef GEMMLOWP_USE_STLPORT
#else
#include <stdint.h>
namespace std {
-using ::uint8_t;
-using ::uint16_t;
-using ::uint32_t;
-using ::int8_t;
using ::int16_t;
using ::int32_t;
+using ::int8_t;
using ::size_t;
+using ::uint16_t;
+using ::uint32_t;
+using ::uint8_t;
using ::uintptr_t;
-}
+} // namespace std
#endif
#include <algorithm>
#include <set>
#endif
-// We should always use C++11 thread_local; unfortunately that
-// isn't fully supported on Apple yet.
-#ifdef __APPLE__
-#define GEMMLOWP_THREAD_LOCAL static __thread
-#define GEMMLOWP_USING_OLD_THREAD_LOCAL
-#else
-#define GEMMLOWP_THREAD_LOCAL thread_local
-#endif
+#include "./pthread_everywhere.h"
namespace gemmlowp {
};
inline ThreadInfo& ThreadLocalThreadInfo() {
-#ifdef GEMMLOWP_USING_OLD_THREAD_LOCAL
- // We're leaking this ThreadInfo structure, because Apple doesn't support
- // non-trivial constructors or destructors for their __thread type modifier.
- GEMMLOWP_THREAD_LOCAL ThreadInfo* i = nullptr;
- if (i == nullptr) {
- i = new ThreadInfo();
+ static pthread_key_t key;
+ static auto DeleteThreadInfo = [](void* threadInfoPtr) {
+ ThreadInfo* threadInfo = static_cast<ThreadInfo*>(threadInfoPtr);
+ if (threadInfo) {
+ delete threadInfo;
+ }
+ };
+
+ static int key_result = pthread_key_create(&key, DeleteThreadInfo);
+
+ ThreadInfo* threadInfo = static_cast<ThreadInfo*>(pthread_getspecific(key));
+ if (!threadInfo) {
+ threadInfo = new ThreadInfo();
+ pthread_setspecific(key, threadInfo);
}
- return *i;
-#else
- GEMMLOWP_THREAD_LOCAL ThreadInfo i;
- return i;
-#endif
+ return *threadInfo;
}
// ScopedProfilingLabel is how one instruments code for profiling
--- /dev/null
+// Copyright 2017 The Gemmlowp Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// pthread_everywhere.h: Either includes <pthread.h> or implements a
+// subset of pthread functionality on top of C++11 <thread> for portability.
+
+#ifndef GEMMLOWP_PROFILING_PTHREAD_EVERYWHERE_H_
+#define GEMMLOWP_PROFILING_PTHREAD_EVERYWHERE_H_
+
+#ifndef _WIN32
+#define GEMMLOWP_USE_PTHREAD
+#endif
+
+#if defined GEMMLOWP_USE_PTHREAD
+#include <pthread.h>
+#else
+// Implement a small subset of pthread on top of C++11 threads.
+// The function signatures differ from true pthread functions in two ways:
+// - True pthread functions return int error codes, ours return void.
+// Rationale: the c++11 <thread> equivalent functions return void
+// and use exceptions to report errors; we don't want to deal with
+// exceptions in this code, so we couldn't meaningfully return errors
+// in the polyfill. Also, the gemmlowp code using these pthread functions
+// never checks their return values anyway.
+// - True pthread *_create/*_init functions take pointers to 'attribute'
+// structs; ours take nullptr_t. That is because gemmlowp always passes
+// nullptr at the moment, so any support we would code for non-null
+// attribs would be unused.
+#include <condition_variable>
+#include <cstddef>
+#include <mutex>
+#include <thread>
+namespace gemmlowp {
+using pthread_t = std::thread *;
+using pthread_mutex_t = std::mutex *;
+using pthread_cond_t = std::condition_variable *;
+inline void pthread_create(pthread_t *thread, std::nullptr_t,
+ void *(*start_routine)(void *), void *arg) {
+ *thread = new std::thread(start_routine, arg);
+}
+inline void pthread_join(pthread_t thread, std::nullptr_t) { thread->join(); }
+inline void pthread_mutex_init(pthread_mutex_t *mutex, std::nullptr_t) {
+ *mutex = new std::mutex;
+}
+inline void pthread_mutex_lock(pthread_mutex_t *mutex) { (*mutex)->lock(); }
+inline void pthread_mutex_unlock(pthread_mutex_t *mutex) { (*mutex)->unlock(); }
+inline void pthread_mutex_destroy(pthread_mutex_t *mutex) { delete *mutex; }
+inline void pthread_cond_init(pthread_cond_t *cond, std::nullptr_t) {
+ *cond = new std::condition_variable;
+}
+inline void pthread_cond_signal(pthread_cond_t *cond) { (*cond)->notify_one(); }
+inline void pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) {
+ std::unique_lock<std::mutex> lock(**mutex, std::adopt_lock);
+ (*cond)->wait(lock);
+ // detach lock from mutex so when we leave this conext
+ // the lock is not released
+ lock.release();
+}
+inline void pthread_cond_destroy(pthread_cond_t *cond) { delete *cond; }
+} // end namespace gemmlowp
+#endif
+
+#endif // GEMMLOWP_PROFILING_PTHREAD_EVERYWHERE_H_
};
// This output stage takes int32 values and returns still int32 values,
-// but "quantized down" to the uint8 scale; in other words, its output
-// is typically what one would then clamp to [0..255] and cast to uint8
+// but "quantized down" to a difference scale; for example, in a pipeline
+// that outputs uint8 values in [0..255], the output of this stage would be
+// int32 values ready to be clamped to [0..255] and casted to uint8
// (see OutputStageSaturatingCastToUint8).
//
// This "quantization down" process depends on 3 parameters,
// expansions that implicitly rely on 0-padding. If 0 were not
// a representable value, such operations would have to pad
// using a nonzero value, introducing bias in the computation.
-struct OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint {
+struct OutputStageQuantizeDownInt32ByFixedPoint {
std::int32_t result_fixedpoint_multiplier;
std::int32_t result_shift;
std::int32_t result_offset_after_shift;
};
+// OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint is the old deprecated
+// name of OutputStageQuantizeDownInt32ByFixedPoint, before we noticed that
+// there really wasn't anything Uint8-specific about it.
+using OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint = OutputStageQuantizeDownInt32ByFixedPoint;
+
+// Variant of OutputStageQuantizeDownInt32ByFixedPoint where the 'shift'
+// is not necessarily just a right shift, so we can represent multipliers
+// greater than 1. This takes an result_exponent parameter; when it's
+// <= 0, this is equivalent to OutputStageQuantizeDownInt32ByFixedPoint
+// with result_shift = -result_exponent.
+// In the general case, this consists in first left-shifting by
+// std::max(result_exponent, 0), before doing the same as
+// OutputStageQuantizeDownInt32ByFixedPoint with
+// result_shift = std::max(-result_exponent, 0).
+struct OutputStageScaleInt32ByFixedPointAndExponent {
+ std::int32_t result_fixedpoint_multiplier;
+ std::int32_t result_exponent;
+ std::int32_t result_offset_after_shift;
+};
+
// This output stage takes int32 values that are expected to be already
// on the final uint8 scale, but not necessarily in the [0..255] range.
// It clamps them to the [0..255] range and returns them casted to uint8.
struct OutputStageSaturatingCastToUint8 {};
+// This output stage takes int32 values that are expected to be already
+// on the final int16 scale, but not necessarily in the [-32768..32767] range.
+// It clamps them to the [-32768..32767] range and returns them casted to int16.
+struct OutputStageSaturatingCastToInt16 {};
+
// This output stage depends on a "bias vector" that should contain int32
// entries, and be either a row-vector of the same number of columns as the
// result matrix, or a column-vector of the same number of rows as the
fi
if [ $TEST == "x86" ]; then
make -f Makefile.travis unittest
-fi
+fi
#include <cassert>
#include <cstdint>
#include <cstdlib>
+#include <cstring>
#include <iostream>
#include <random>
#include <type_traits>
-#if !defined __arm__ && !defined __aarch64__
-#error This benchmark assumes ARM (for inline assembly sections).
+#if !defined(__arm__) && !defined(__aarch64__) && \
+ !(defined(__mips) && (__mips_isa_rev >= 5) && defined(__mips_msa))
+#error This benchmark assumes ARM or MIPS (for intrinsics and inline assembly sections).
#endif
+#if defined(__arm__) || defined(__aarch64__)
#include <arm_neon.h>
+#endif
+
+#if defined(__mips)
+#include <msa.h>
+
+// Some convenience macros to hide differences between MIPS32 and MIPS64.
+#ifdef __LP64__
+#define GEMMLOWP_MIPS_XADDIU "daddiu"
+#else
+#define GEMMLOWP_MIPS_XADDIU "addiu"
+#endif
+#endif
// Typically one wants to fit in L1 cache, and GEMM implementations
// are carefully optimized to tune their access patterns to that effect.
}
};
+#ifdef __ARM_FEATURE_DOTPROD
+// Kernels utilizing the Armv8.2 Dot Product extension.
+//
+// The dot product instructions work by taking 4 consecutive 8-bit depth
+// values from each operand, multiplying the 4 pairs together and
+// accumulating all the results into the corresponding 32-bit accumulator
+// lane. As such, the operation is identical to a 32-bit instruction (like
+// FMLA used in SGEMM), except that 4 depth values are processed at a time
+// instead of 1.
+
+// Thus, this first kernel is a carbon copy of
+// "NEON_64bit_GEMM_Float32_WithScalar_A57" (which should provide good
+// performance for most processors) below with the opcode (fmla -> udot) and
+// types (float32 -> uint8/uint32) changed.
+//
+// A signed version of this kernel could be produced by replacing "udot"
+// with "sdot" - performance should be identical to this udot kernel.
+struct NEON_64bit_GEMM_Uint8Operands_Uint32Accumulators_dotproduct {
+ typedef std::uint8_t OperandType;
+ typedef std::uint32_t AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 4, CellOrder::WidthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 4, CellOrder::WidthMajor>, 2> >
+ Format;
+ static void Run(const OperandType* lhs_ptr, const OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ asm volatile(
+ // Load accumulators
+ "mov x0, %[accum_ptr]\n"
+ "ld1 {v8.4s}, [x0], #16\n"
+ "ld1 {v16.4s}, [x0], #16\n"
+ "ld1 {v24.4s}, [x0], #16\n"
+ "ld1 {v9.4s}, [x0], #16\n"
+ "ld1 {v17.4s}, [x0], #16\n"
+ "ld1 {v25.4s}, [x0], #16\n"
+ "ld1 {v10.4s}, [x0], #16\n"
+ "ld1 {v18.4s}, [x0], #16\n"
+ "ld1 {v26.4s}, [x0], #16\n"
+ "ld1 {v11.4s}, [x0], #16\n"
+ "ld1 {v19.4s}, [x0], #16\n"
+ "ld1 {v27.4s}, [x0], #16\n"
+ "ld1 {v12.4s}, [x0], #16\n"
+ "ld1 {v20.4s}, [x0], #16\n"
+ "ld1 {v28.4s}, [x0], #16\n"
+ "ld1 {v13.4s}, [x0], #16\n"
+ "ld1 {v21.4s}, [x0], #16\n"
+ "ld1 {v29.4s}, [x0], #16\n"
+ "ld1 {v14.4s}, [x0], #16\n"
+ "ld1 {v22.4s}, [x0], #16\n"
+ "ld1 {v30.4s}, [x0], #16\n"
+ "ld1 {v15.4s}, [x0], #16\n"
+ "ld1 {v23.4s}, [x0], #16\n"
+ "ld1 {v31.4s}, [x0], #16\n"
+
+ // The start of the loop assumes first Rhs cell is already loaded, so
+ // do it here for first iteration.
+ "ld1 {v0.16b}, [%[rhs_ptr]], #16\n"
+
+ // And the same for the first Lhs cell.
+ "ld1 {v2.16b}, [%[lhs_ptr]], #16\n"
+
+ GEMMLOWP_LABEL_LOOP
+ ":\n"
+
+ // Start the MACs at the head of the loop - 1st cell from each side
+ // already loaded.
+ "udot v8.4s, v2.16b, v0.b[0]\n"
+ "udot v9.4s, v2.16b, v0.b[1]\n"
+ "ld1 {v1.16b}, [%[rhs_ptr]], #16\n" // Load second Rhs cell.
+ "udot v10.4s, v2.16b, v0.b[2]\n"
+ "udot v11.4s, v2.16b, v0.b[3]\n"
+ "ld1 {v3.16b}, [%[lhs_ptr]], #16\n" // Load second Lhs cell.
+ "udot v12.4s, v2.16b, v1.b[0]\n"
+ "udot v13.4s, v2.16b, v1.b[1]\n"
+ "ld1 {v4.16b}, [%[lhs_ptr]], #16\n" // Load third Lhs cell.
+ "udot v14.4s, v2.16b, v1.b[2]\n"
+ "udot v15.4s, v2.16b, v1.b[3]\n"
+ "ld1 {v2.16b}, [%[lhs_ptr]], #16\n" // Done with first Lhs cell - load
+ // for the next iteration early.
+ "udot v16.4s, v3.16b, v0.b[0]\n"
+ "udot v17.4s, v3.16b, v0.b[1]\n"
+ "udot v18.4s, v3.16b, v0.b[2]\n"
+ "udot v19.4s, v3.16b, v0.b[3]\n"
+ "udot v20.4s, v3.16b, v1.b[0]\n"
+ "udot v21.4s, v3.16b, v1.b[1]\n"
+ "udot v22.4s, v3.16b, v1.b[2]\n"
+ "udot v23.4s, v3.16b, v1.b[3]\n"
+ "udot v24.4s, v4.16b, v0.b[0]\n"
+ "udot v25.4s, v4.16b, v0.b[1]\n"
+ "udot v26.4s, v4.16b, v0.b[2]\n"
+ "udot v27.4s, v4.16b, v0.b[3]\n"
+ "ld1 {v0.16b}, [%[rhs_ptr]], #16\n" // Done with the first Rhs cell -
+ // load for the next iteration early.
+ "udot v28.4s, v4.16b, v1.b[0]\n"
+ "udot v29.4s, v4.16b, v1.b[1]\n"
+
+ // Loop. Decrement loop index (depth) by 4 as udot processes 4
+ // depth values.
+ "subs %w[depth], %w[depth], #4\n"
+ "udot v30.4s, v4.16b, v1.b[2]\n"
+ "udot v31.4s, v4.16b, v1.b[3]\n"
+
+ "bne " GEMMLOWP_LABEL_LOOP
+ "b\n"
+
+ // Store accumulators
+ "mov x0, %[accum_ptr]\n"
+ "st1 {v8.16b}, [x0], #16\n"
+ "st1 {v16.16b}, [x0], #16\n"
+ "st1 {v24.16b}, [x0], #16\n"
+ "st1 {v9.16b}, [x0], #16\n"
+ "st1 {v17.16b}, [x0], #16\n"
+ "st1 {v25.16b}, [x0], #16\n"
+ "st1 {v10.16b}, [x0], #16\n"
+ "st1 {v18.16b}, [x0], #16\n"
+ "st1 {v26.16b}, [x0], #16\n"
+ "st1 {v11.16b}, [x0], #16\n"
+ "st1 {v19.16b}, [x0], #16\n"
+ "st1 {v27.16b}, [x0], #16\n"
+ "st1 {v12.16b}, [x0], #16\n"
+ "st1 {v20.16b}, [x0], #16\n"
+ "st1 {v28.16b}, [x0], #16\n"
+ "st1 {v13.16b}, [x0], #16\n"
+ "st1 {v21.16b}, [x0], #16\n"
+ "st1 {v29.16b}, [x0], #16\n"
+ "st1 {v14.16b}, [x0], #16\n"
+ "st1 {v22.16b}, [x0], #16\n"
+ "st1 {v30.16b}, [x0], #16\n"
+ "st1 {v15.16b}, [x0], #16\n"
+ "st1 {v23.16b}, [x0], #16\n"
+ "st1 {v31.16b}, [x0], #16\n"
+ : // outputs
+ [lhs_ptr] "+r"(lhs_ptr), [rhs_ptr] "+r"(rhs_ptr),
+ [depth] "+r"(depth)
+ : // inputs
+ [accum_ptr] "r"(accum_ptr)
+ : // clobbers
+ "cc", "memory", "x0", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
+ "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27",
+ "v28", "v29", "v30", "v31");
+ }
+};
+
+// As above, except tuned for Cortex-A55r1.
+//
+// Similarly, this is a clone of NEON_64bit_GEMM_Float32_WithScalar_A55r1
+// with the names changed.
+struct NEON_64bit_GEMM_Uint8Operands_Uint32Accumulators_dotproduct_A55r1 {
+ typedef std::uint8_t OperandType;
+ typedef std::uint32_t AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 4, CellOrder::WidthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 4, CellOrder::WidthMajor>, 2> >
+ Format;
+ static void Run(const OperandType* lhs_ptr, const OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ asm volatile(
+ // Load accumulators
+ "mov x0, %[accum_ptr]\n"
+ "ld1 {v8.4s}, [x0], #16\n"
+ "ld1 {v16.4s}, [x0], #16\n"
+ "ld1 {v24.4s}, [x0], #16\n"
+ "ld1 {v9.4s}, [x0], #16\n"
+ "ld1 {v17.4s}, [x0], #16\n"
+ "ld1 {v25.4s}, [x0], #16\n"
+ "ld1 {v10.4s}, [x0], #16\n"
+ "ld1 {v18.4s}, [x0], #16\n"
+ "ld1 {v26.4s}, [x0], #16\n"
+ "ld1 {v11.4s}, [x0], #16\n"
+ "ld1 {v19.4s}, [x0], #16\n"
+ "ld1 {v27.4s}, [x0], #16\n"
+ "ld1 {v12.4s}, [x0], #16\n"
+ "ld1 {v20.4s}, [x0], #16\n"
+ "ld1 {v28.4s}, [x0], #16\n"
+ "ld1 {v13.4s}, [x0], #16\n"
+ "ld1 {v21.4s}, [x0], #16\n"
+ "ld1 {v29.4s}, [x0], #16\n"
+ "ld1 {v14.4s}, [x0], #16\n"
+ "ld1 {v22.4s}, [x0], #16\n"
+ "ld1 {v30.4s}, [x0], #16\n"
+ "ld1 {v15.4s}, [x0], #16\n"
+ "ld1 {v23.4s}, [x0], #16\n"
+ "ld1 {v31.4s}, [x0], #16\n"
+
+ // For details on how this kernel works, see the Float32 kernel below.
+
+ "ldr d0, [%[rhs_ptr]]\n"
+ "ldr x18, [%[rhs_ptr], #8]\n"
+
+ "ldr q2, [%[lhs_ptr]]\n"
+ "ldr q3, [%[lhs_ptr], #16]\n"
+
+ GEMMLOWP_LABEL_LOOP
+ ":\n"
+
+ "udot v8.4s, v2.16b, v0.b[0]\n"
+ "ldr d1, [%[rhs_ptr], #16]\n" // Bottom half of v1
+ "udot v9.4s, v2.16b, v0.b[1]\n"
+ "ins v0.d[1], x18\n" // Finish loading v0
+ "udot v16.4s, v3.16b, v0.b[0]\n" // out of sequence - used to reduce load/use pressure.
+ "ldr x18, [%[rhs_ptr], #24]\n" // Top half of v1 to X register
+ "udot v17.4s, v3.16b, v0.b[1]\n" // out of sequence - used to reduce load/use pressure.
+ "add %[rhs_ptr], %[rhs_ptr], #32\n" // RHS loads complete - increment pointer.
+ "udot v10.4s, v2.16b, v0.b[2]\n"
+ "ldr d4, [%[lhs_ptr], #32]\n" // Bottom half of v4
+ "udot v11.4s, v2.16b, v0.b[3]\n"
+ "ins v1.d[1], x18\n" // Finish loading v1
+ "udot v12.4s, v2.16b, v1.b[0]\n"
+ "ldr x18, [%[lhs_ptr], #40]\n" // Top half of v4 to X register
+ "udot v13.4s, v2.16b, v1.b[1]\n"
+ "add %[lhs_ptr], %[lhs_ptr], #48\n" // LHS loads complete - increment pointer.
+ "udot v14.4s, v2.16b, v1.b[2]\n"
+
+ "udot v15.4s, v2.16b, v1.b[3]\n"
+ "ldr d2, [%[lhs_ptr]]\n" // Bottom half of v2 (for next time)
+ "udot v18.4s, v3.16b, v0.b[2]\n"
+ "ins v4.d[1], x18\n" // Finish loading v4
+ "udot v19.4s, v3.16b, v0.b[3]\n"
+ "ldr x18, [%[lhs_ptr], #8]\n" // Top half of next v2 to X register
+ "udot v20.4s, v3.16b, v1.b[0]\n"
+ "subs %w[depth], %w[depth], #4\n"
+ "udot v21.4s, v3.16b, v1.b[1]\n"
+
+ "udot v22.4s, v3.16b, v1.b[2]\n"
+
+ "udot v23.4s, v3.16b, v1.b[3]\n"
+ "ldr d3, [%[lhs_ptr], #16]\n" // Bottom half of v3 (for next time)
+ "udot v24.4s, v4.16b, v0.b[0]\n"
+ "ins v2.d[1], x18\n" // Finish loading next v2
+ "udot v25.4s, v4.16b, v0.b[1]\n"
+ "ldr x18, [%[lhs_ptr], #24]\n" // Top half of next v3 to X register
+ "udot v26.4s, v4.16b, v0.b[2]\n"
+
+ "udot v27.4s, v4.16b, v0.b[3]\n"
+ "ldr d0, [%[rhs_ptr]]\n" // Bottom half of v0 (for next time)
+ "udot v28.4s, v4.16b, v1.b[0]\n"
+ "ins v3.d[1], x18\n" // Finish loading next v3
+ "udot v29.4s, v4.16b, v1.b[1]\n"
+ "ldr x18, [%[rhs_ptr], #8]\n" // Top half of next v0 to X register
+ "udot v30.4s, v4.16b, v1.b[2]\n"
+
+ "udot v31.4s, v4.16b, v1.b[3]\n"
+ "bne " GEMMLOWP_LABEL_LOOP "b\n"
+
+ // Store accumulators
+ "mov x0, %[accum_ptr]\n"
+ "st1 {v8.4s}, [x0], #16\n"
+ "st1 {v16.4s}, [x0], #16\n"
+ "st1 {v24.4s}, [x0], #16\n"
+ "st1 {v9.4s}, [x0], #16\n"
+ "st1 {v17.4s}, [x0], #16\n"
+ "st1 {v25.4s}, [x0], #16\n"
+ "st1 {v10.4s}, [x0], #16\n"
+ "st1 {v18.4s}, [x0], #16\n"
+ "st1 {v26.4s}, [x0], #16\n"
+ "st1 {v11.4s}, [x0], #16\n"
+ "st1 {v19.4s}, [x0], #16\n"
+ "st1 {v27.4s}, [x0], #16\n"
+ "st1 {v12.4s}, [x0], #16\n"
+ "st1 {v20.4s}, [x0], #16\n"
+ "st1 {v28.4s}, [x0], #16\n"
+ "st1 {v13.4s}, [x0], #16\n"
+ "st1 {v21.4s}, [x0], #16\n"
+ "st1 {v29.4s}, [x0], #16\n"
+ "st1 {v14.4s}, [x0], #16\n"
+ "st1 {v22.4s}, [x0], #16\n"
+ "st1 {v30.4s}, [x0], #16\n"
+ "st1 {v15.4s}, [x0], #16\n"
+ "st1 {v23.4s}, [x0], #16\n"
+ "st1 {v31.4s}, [x0], #16\n"
+ : // outputs
+ [lhs_ptr] "+r"(lhs_ptr), [rhs_ptr] "+r"(rhs_ptr),
+ [depth] "+r"(depth)
+ : // inputs
+ [accum_ptr] "r"(accum_ptr)
+ : // clobbers
+ "cc", "memory", "x0", "x18", "v0", "v1", "v2", "v3", "v4", "v5", "v6",
+ "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16",
+ "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
+ "v27", "v28", "v29", "v30", "v31");
+ }
+};
+#endif // __ARM_FEATURE_DOTPROD
+
// We don't actually use int32*int32 in production. This is just an
// experiment to help dissociate the effect of integer-vs-float, from the
// effect of operands width.
};
#endif
+// Faster kernel contributed by ARM. Tuned for A55r1.
+struct NEON_64bit_GEMM_Float32_WithScalar_A55r1 {
+ typedef float OperandType;
+ typedef float AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 1, CellOrder::DepthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 1, CellOrder::DepthMajor>, 2> >
+ Format;
+ static void Run(const OperandType* lhs_ptr, const OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ asm volatile(
+ // Load accumulators
+ "mov x0, %[accum_ptr]\n"
+ "ld1 {v8.4s}, [x0], #16\n"
+ "ld1 {v16.4s}, [x0], #16\n"
+ "ld1 {v24.4s}, [x0], #16\n"
+ "ld1 {v9.4s}, [x0], #16\n"
+ "ld1 {v17.4s}, [x0], #16\n"
+ "ld1 {v25.4s}, [x0], #16\n"
+ "ld1 {v10.4s}, [x0], #16\n"
+ "ld1 {v18.4s}, [x0], #16\n"
+ "ld1 {v26.4s}, [x0], #16\n"
+ "ld1 {v11.4s}, [x0], #16\n"
+ "ld1 {v19.4s}, [x0], #16\n"
+ "ld1 {v27.4s}, [x0], #16\n"
+ "ld1 {v12.4s}, [x0], #16\n"
+ "ld1 {v20.4s}, [x0], #16\n"
+ "ld1 {v28.4s}, [x0], #16\n"
+ "ld1 {v13.4s}, [x0], #16\n"
+ "ld1 {v21.4s}, [x0], #16\n"
+ "ld1 {v29.4s}, [x0], #16\n"
+ "ld1 {v14.4s}, [x0], #16\n"
+ "ld1 {v22.4s}, [x0], #16\n"
+ "ld1 {v30.4s}, [x0], #16\n"
+ "ld1 {v15.4s}, [x0], #16\n"
+ "ld1 {v23.4s}, [x0], #16\n"
+ "ld1 {v31.4s}, [x0], #16\n"
+
+ // A55r1 requires a hybrid of the A53 and standard approaches.
+ //
+ // Like A53, this processor prefers 64-bit loads.
+ //
+ // Unlike A53, it is capable of dual-issuing a 64-bit vector load
+ // (or INS) with a FMLA instruction.
+ //
+ // Therefore we aim to issue an FMLA instruction every cycle.
+ // Alongside three FMLAs we can dual issue a (vector) 64-bit load, a
+ // scalar 64-bit load and finally an INS to replicate the effect of
+ // a single 128-bit load.
+ //
+ // The loop contains 24 FMLA instructions, and 5 vector registers
+ // need to be loaded, consuming 15 dual issue slots. This leaves 9
+ // dual issue slots. Four of these are used for loop housekeeping
+ // (2 pointer adds, 1 counter update and 1 branch), leaving 5 left
+ // over (marked by blank lines).
+ //
+ // Choice of x18 to store the upper halves on their way into the
+ // vector registers is arbitrary. Added to the clobber list so that
+ // the compiler will make it available.
+
+
+ // At the start of the loop, it is assumed that v0 is "half loaded" -
+ // bottom half in place in d0 and the upper half in x18 ready to
+ // insert. So set that up here for the first iteration:
+ "ldr d0, [%[rhs_ptr]]\n" // Bottom half of first Rhs cell
+ "ldr x18, [%[rhs_ptr], #8]\n" // Upper half
+
+ // v2-v3 should be fully loaded - as it's outside the loop proper it's fine
+ // to use a 128-bit load here.
+ "ldr q2, [%[lhs_ptr]]\n" // first Lhs cell
+ "ldr q3, [%[lhs_ptr], #16]\n" // second Lhs cell
+
+ GEMMLOWP_LABEL_LOOP
+ ":\n"
+
+ "fmla v8.4s, v2.4s, v0.s[0]\n"
+ "ldr d1, [%[rhs_ptr], #16]\n" // Bottom half of v1
+ "fmla v9.4s, v2.4s, v0.s[1]\n"
+ "ins v0.d[1], x18\n" // Finish loading v0
+ "fmla v16.4s, v3.4s, v0.s[0]\n" // out of sequence - used to reduce load/use pressure.
+ "ldr x18, [%[rhs_ptr], #24]\n" // Top half of v1 to X register
+ "fmla v17.4s, v3.4s, v0.s[1]\n" // out of sequence - used to reduce load/use pressure.
+ "add %[rhs_ptr], %[rhs_ptr], #32\n" // RHS loads complete - increment pointer.
+ "fmla v10.4s, v2.4s, v0.s[2]\n"
+ "ldr d4, [%[lhs_ptr], #32]\n" // Bottom half of v4
+ "fmla v11.4s, v2.4s, v0.s[3]\n"
+ "ins v1.d[1], x18\n" // Finish loading v1
+ "fmla v12.4s, v2.4s, v1.s[0]\n"
+ "ldr x18, [%[lhs_ptr], #40]\n" // Top half of v4 to X register
+ "fmla v13.4s, v2.4s, v1.s[1]\n"
+ "add %[lhs_ptr], %[lhs_ptr], #48\n" // LHS loads complete - increment pointer.
+ "fmla v14.4s, v2.4s, v1.s[2]\n"
+
+ "fmla v15.4s, v2.4s, v1.s[3]\n"
+ "ldr d2, [%[lhs_ptr]]\n" // Bottom half of v2 (for next time)
+ "fmla v18.4s, v3.4s, v0.s[2]\n"
+ "ins v4.d[1], x18\n" // Finish loading v4
+ "fmla v19.4s, v3.4s, v0.s[3]\n"
+ "ldr x18, [%[lhs_ptr], #8]\n" // Top half of next v2 to X register
+ "fmla v20.4s, v3.4s, v1.s[0]\n"
+ "subs %w[depth], %w[depth], #1\n"
+ "fmla v21.4s, v3.4s, v1.s[1]\n"
+
+ "fmla v22.4s, v3.4s, v1.s[2]\n"
+
+ "fmla v23.4s, v3.4s, v1.s[3]\n"
+ "ldr d3, [%[lhs_ptr], #16]\n" // Bottom half of v3 (for next time)
+ "fmla v24.4s, v4.4s, v0.s[0]\n"
+ "ins v2.d[1], x18\n" // Finish loading next v2
+ "fmla v25.4s, v4.4s, v0.s[1]\n"
+ "ldr x18, [%[lhs_ptr], #24]\n" // Top half of next v3 to X register
+ "fmla v26.4s, v4.4s, v0.s[2]\n"
+
+ "fmla v27.4s, v4.4s, v0.s[3]\n"
+ "ldr d0, [%[rhs_ptr]]\n" // Bottom half of v0 (for next time)
+ "fmla v28.4s, v4.4s, v1.s[0]\n"
+ "ins v3.d[1], x18\n" // Finish loading next v3
+ "fmla v29.4s, v4.4s, v1.s[1]\n"
+ "ldr x18, [%[rhs_ptr], #8]\n" // Top half of next v0 to X register
+ "fmla v30.4s, v4.4s, v1.s[2]\n"
+
+ "fmla v31.4s, v4.4s, v1.s[3]\n"
+ "bne " GEMMLOWP_LABEL_LOOP "b\n"
+
+ // Store accumulators
+ "mov x0, %[accum_ptr]\n"
+ "st1 {v8.4s}, [x0], #16\n"
+ "st1 {v16.4s}, [x0], #16\n"
+ "st1 {v24.4s}, [x0], #16\n"
+ "st1 {v9.4s}, [x0], #16\n"
+ "st1 {v17.4s}, [x0], #16\n"
+ "st1 {v25.4s}, [x0], #16\n"
+ "st1 {v10.4s}, [x0], #16\n"
+ "st1 {v18.4s}, [x0], #16\n"
+ "st1 {v26.4s}, [x0], #16\n"
+ "st1 {v11.4s}, [x0], #16\n"
+ "st1 {v19.4s}, [x0], #16\n"
+ "st1 {v27.4s}, [x0], #16\n"
+ "st1 {v12.4s}, [x0], #16\n"
+ "st1 {v20.4s}, [x0], #16\n"
+ "st1 {v28.4s}, [x0], #16\n"
+ "st1 {v13.4s}, [x0], #16\n"
+ "st1 {v21.4s}, [x0], #16\n"
+ "st1 {v29.4s}, [x0], #16\n"
+ "st1 {v14.4s}, [x0], #16\n"
+ "st1 {v22.4s}, [x0], #16\n"
+ "st1 {v30.4s}, [x0], #16\n"
+ "st1 {v15.4s}, [x0], #16\n"
+ "st1 {v23.4s}, [x0], #16\n"
+ "st1 {v31.4s}, [x0], #16\n"
+ : // outputs
+ [lhs_ptr] "+r"(lhs_ptr), [rhs_ptr] "+r"(rhs_ptr),
+ [depth] "+r"(depth)
+ : // inputs
+ [accum_ptr] "r"(accum_ptr)
+ : // clobbers
+ "cc", "memory", "x0", "x18", "v0", "v1", "v2", "v3", "v4", "v5", "v6",
+ "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16",
+ "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
+ "v27", "v28", "v29", "v30", "v31");
+ }
+};
+
#endif // __aarch64__
+#if defined(__arm__) || defined(__aarch64__)
#ifndef __aarch64__
inline int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
const int32x2_t c = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
using NEON_64bit_GEMM_Float32_WithScalar_intrinsics =
NEON_GEMM_Float32_WithScalar_intrinsics<2>;
+#endif // __arm__ || __aarch64__
+
+#ifdef __mips
+// Using 32x32=32 multiplications.
+// 20 MSA regs used:
+// - 12 accumulators
+// - 6 lhs
+// - 1 rhs
+// - 1 temps/zeroes
+// ~55 instructions in the loop.
+struct MSA_GEMM_12x4_Uint8Operands_Uint32Accumulators_intrinsics {
+ typedef std::uint8_t OperandType;
+ typedef std::int32_t AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 1> >
+ Format;
+ static void Run(const OperandType* lhs_ptr, const OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ const v16i8 zeroes = __builtin_msa_ldi_b(0);
+ v4i32 acc[3][4];
+ // Load accumulators.
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 4; j++) {
+ acc[i][j] = __builtin_msa_ld_w(accum_ptr + 4 * (i + 3 * j), 0);
+ }
+ }
+
+ while (depth > 0) {
+ // Load 3 x 8 bytes of lhs[] with 2 16-byte overlapped loads.
+ v8i16 lhs[6];
+ lhs[0] = reinterpret_cast<v8i16>(__builtin_msa_ld_b(const_cast<OperandType*>(lhs_ptr), 0));
+ lhs[1] =
+ reinterpret_cast<v8i16>(__builtin_msa_ld_b(const_cast<OperandType*>(lhs_ptr + 8), 0));
+
+ // Zero-extend 8-bit elements of lhs[] to 16 bits.
+ lhs[0] = reinterpret_cast<v8i16>(__builtin_msa_ilvr_b(zeroes,
+ reinterpret_cast<v16i8>(lhs[0])));
+ lhs[2] = reinterpret_cast<v8i16>(__builtin_msa_ilvl_b(zeroes,
+ reinterpret_cast<v16i8>(lhs[1])));
+ lhs[1] = reinterpret_cast<v8i16>(__builtin_msa_ilvr_b(zeroes,
+ reinterpret_cast<v16i8>(lhs[1])));
+
+ // Zero-extend 16-bit elements of lhs[] to 32 bits.
+ lhs[3] = __builtin_msa_ilvl_h(reinterpret_cast<v8i16>(zeroes), lhs[0]);
+ lhs[4] = __builtin_msa_ilvl_h(reinterpret_cast<v8i16>(zeroes), lhs[1]);
+ lhs[5] = __builtin_msa_ilvl_h(reinterpret_cast<v8i16>(zeroes), lhs[2]);
+ lhs[0] = __builtin_msa_ilvr_h(reinterpret_cast<v8i16>(zeroes), lhs[0]);
+ lhs[1] = __builtin_msa_ilvr_h(reinterpret_cast<v8i16>(zeroes), lhs[1]);
+ lhs[2] = __builtin_msa_ilvr_h(reinterpret_cast<v8i16>(zeroes), lhs[2]);
+
+ // Depth 0.
+ for (int j = 0; j < 4; j++) {
+ // Load 1 byte of rhs, making 4 32-bit replicas of it.
+ v4i32 rhs = reinterpret_cast<v4i32>(__builtin_msa_fill_w(rhs_ptr[j]));
+ // Multiply-add into accumulators.
+ for (int i = 0; i < 3; i++) {
+ acc[i][j] = __builtin_msa_maddv_w(acc[i][j], reinterpret_cast<v4i32>(lhs[i]), rhs);
+ }
+ }
+
+ // Depth 1.
+ for (int j = 0; j < 4; j++) {
+ // Load 1 byte of rhs, making 4 32-bit replicas of it.
+ v4i32 rhs = reinterpret_cast<v4i32>(__builtin_msa_fill_w(rhs_ptr[j + 4]));
+ // Multiply-add into accumulators.
+ for (int i = 0; i < 3; i++) {
+ acc[i][j] = __builtin_msa_maddv_w(acc[i][j], reinterpret_cast<v4i32>(lhs[i + 3]), rhs);
+ }
+ }
+
+ lhs_ptr += 24;
+ rhs_ptr += 8;
+ depth -= 2;
+ }
+
+ // Store accumulators.
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 4; j++) {
+ __builtin_msa_st_w(acc[i][j], accum_ptr + 4 * (i + 3 * j), 0);
+ }
+ }
+ }
+};
+
+// Assembly implementation of the above
+// MSA_GEMM_12x4_Uint8Operands_Uint32Accumulators_intrinsics.
+// Using 32x32=32 multiplications.
+// 20 MSA regs used:
+// - 12 accumulators
+// - 6 lhs
+// - 1 rhs
+// - 1 temps/zeroes
+// ~55 instructions in the loop.
+struct MSA_GEMM_12x4_Uint8Operands_Uint32Accumulators_assembly {
+ typedef std::uint8_t OperandType;
+ typedef std::int32_t AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 1> >
+ Format;
+ static void Run(OperandType* lhs_ptr, OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ asm volatile(
+ // Load accumulators
+ "ld.w $w0, (0*16)(%[accum_ptr])\n"
+ "ld.w $w4, (1*16)(%[accum_ptr])\n"
+ "ld.w $w8, (2*16)(%[accum_ptr])\n"
+ "ld.w $w1, (3*16)(%[accum_ptr])\n"
+ "ld.w $w5, (4*16)(%[accum_ptr])\n"
+ "ld.w $w9, (5*16)(%[accum_ptr])\n"
+ "ld.w $w2, (6*16)(%[accum_ptr])\n"
+ "ld.w $w6, (7*16)(%[accum_ptr])\n"
+ "ld.w $w10, (8*16)(%[accum_ptr])\n"
+ "ld.w $w3, (9*16)(%[accum_ptr])\n"
+ "ld.w $w7, (10*16)(%[accum_ptr])\n"
+ "ld.w $w11, (11*16)(%[accum_ptr])\n"
+ // Set a temp to all zeroes.
+ "ldi.b $w19, 0\n"
+
+ GEMMLOWP_LABEL_LOOP ":\n"
+ // Overview of register layout:
+ //
+ // A half of the 2x4 cell of Rhs is stored in 32bit in w18.
+ // A 12x2 block of 3 4x2 cells Lhs is stored in 32bit in w12-w17.
+ // A 12x4 block of accumulators is stored in 32bit in w0-w11.
+ //
+ // +------+------+------+------+
+ // Rhs |w18[0]|w18[1]|w18[2]|w18[3]|
+ // +------+------+------+------+
+ //
+ // | | | | |
+ //
+ // Lhs | | | | |
+ //
+ // +---+---+ - - - - +------+------+------+------+
+ // |w12|w15| | w0 | w1 | w2 | w3 |
+ // |w12|w15| | w0 | w1 | w2 | w3 |
+ // |w12|w15| | w0 | w1 | w2 | w3 |
+ // |w12|w15| | w0 | w1 | w2 | w3 |
+ // +---+---+ - - - - +------+------+------+------+
+ // |w13|w16| | w4 | w5 | w6 | w7 |
+ // |w13|w16| | w4 | w5 | w6 | w7 |
+ // |w13|w16| | w4 | w5 | w6 | w7 |
+ // |w13|w16| | w4 | w5 | w6 | w7 |
+ // +---+---+ - - - - +------+------+------+------+
+ // |w14|w17| | w8 | w9 | w10 | w11 |
+ // |w14|w17| | w8 | w9 | w10 | w11 |
+ // |w14|w17| | w8 | w9 | w10 | w11 |
+ // |w14|w17| | w8 | w9 | w10 | w11 |
+ // +---+---+ - - - - +------+------+------+------+
+ //
+ // Accumulator
+
+ // Load 3 x 8 bytes of lhs[] with 2 16-byte overlapped loads.
+ "ld.b $w12, 0(%[lhs_ptr])\n"
+ "ld.b $w13, 8(%[lhs_ptr])\n"
+
+ // Load 4 bytes of rhs[] for depth 0.
+ "lbu $a0, 0(%[rhs_ptr])\n"
+ "lbu $a1, 1(%[rhs_ptr])\n"
+ "lbu $a2, 2(%[rhs_ptr])\n"
+ "lbu $a3, 3(%[rhs_ptr])\n"
+
+ // Zero-extend 8-bit elements of lhs[] to 16 bits.
+ "ilvr.b $w12, $w19, $w12\n"
+ "ilvl.b $w14, $w19, $w13\n"
+ "ilvr.b $w13, $w19, $w13\n"
+ // Zero-extend 16-bit elements of lhs[] to 32 bits.
+ "ilvl.h $w15, $w19, $w12\n"
+ "ilvl.h $w16, $w19, $w13\n"
+ "ilvl.h $w17, $w19, $w14\n"
+ "ilvr.h $w12, $w19, $w12\n"
+ "ilvr.h $w13, $w19, $w13\n"
+ "ilvr.h $w14, $w19, $w14\n"
+
+ // Depth 0.
+ "fill.w $w18, $a0\n"
+ "lbu $a0, 4(%[rhs_ptr])\n"
+ "maddv.w $w0, $w12, $w18\n"
+ "maddv.w $w4, $w13, $w18\n"
+ "maddv.w $w8, $w14, $w18\n"
+ "fill.w $w18, $a1\n"
+ "lbu $a1, 5(%[rhs_ptr])\n"
+ "maddv.w $w1, $w12, $w18\n"
+ "maddv.w $w5, $w13, $w18\n"
+ "maddv.w $w9, $w14, $w18\n"
+ "fill.w $w18, $a2\n"
+ "lbu $a2, 6(%[rhs_ptr])\n"
+ "maddv.w $w2, $w12, $w18\n"
+ "maddv.w $w6, $w13, $w18\n"
+ "maddv.w $w10, $w14, $w18\n"
+ "fill.w $w18, $a3\n"
+ "lbu $a3, 7(%[rhs_ptr])\n"
+ "maddv.w $w3, $w12, $w18\n"
+ "maddv.w $w7, $w13, $w18\n"
+ "maddv.w $w11, $w14, $w18\n"
+
+ // Depth 1.
+ "fill.w $w18, $a0\n"
+ "maddv.w $w0, $w15, $w18\n"
+ "maddv.w $w4, $w16, $w18\n"
+ "maddv.w $w8, $w17, $w18\n"
+ "fill.w $w18, $a1\n"
+ "maddv.w $w1, $w15, $w18\n"
+ "maddv.w $w5, $w16, $w18\n"
+ "maddv.w $w9, $w17, $w18\n"
+ "fill.w $w18, $a2\n"
+ "maddv.w $w2, $w15, $w18\n"
+ "maddv.w $w6, $w16, $w18\n"
+ "maddv.w $w10, $w17, $w18\n"
+ "fill.w $w18, $a3\n"
+ "maddv.w $w3, $w15, $w18\n"
+ "maddv.w $w7, $w16, $w18\n"
+ "maddv.w $w11, $w17, $w18\n"
+
+ "addiu %[depth], -2\n"
+ GEMMLOWP_MIPS_XADDIU " %[lhs_ptr], 24\n"
+ GEMMLOWP_MIPS_XADDIU " %[rhs_ptr], 8\n"
+ "bnez %[depth]," GEMMLOWP_LABEL_LOOP "b\n"
+
+ // Store accumulators.
+ "st.w $w0, (0*16)(%[accum_ptr])\n"
+ "st.w $w4, (1*16)(%[accum_ptr])\n"
+ "st.w $w8, (2*16)(%[accum_ptr])\n"
+ "st.w $w1, (3*16)(%[accum_ptr])\n"
+ "st.w $w5, (4*16)(%[accum_ptr])\n"
+ "st.w $w9, (5*16)(%[accum_ptr])\n"
+ "st.w $w2, (6*16)(%[accum_ptr])\n"
+ "st.w $w6, (7*16)(%[accum_ptr])\n"
+ "st.w $w10, (8*16)(%[accum_ptr])\n"
+ "st.w $w3, (9*16)(%[accum_ptr])\n"
+ "st.w $w7, (10*16)(%[accum_ptr])\n"
+ "st.w $w11, (11*16)(%[accum_ptr])\n"
+ : // outputs
+ [lhs_ptr] "+r"(lhs_ptr), [rhs_ptr] "+r"(rhs_ptr),
+ [depth] "+r"(depth)
+ : // inputs
+ [accum_ptr] "r"(accum_ptr)
+ : // clobbers
+ "memory",
+ "a0", "a1", "a2", "a3",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19");
+ }
+};
+
+// Assembly implementation of the above
+// MSA_GEMM_12x4_Uint8Operands_Uint32Accumulators_intrinsics2 (TODO).
+// Using 16x16=32 multiplications.
+// 20 MSA regs used:
+// - 12 accumulators
+// - 3 lhs
+// - 4 rhs
+// - 1 temps/zeroes
+// ~45 instructions in the loop.
+struct MSA_GEMM_12x4_Uint8Operands_Uint32Accumulators_assembly2 {
+ typedef std::uint8_t OperandType;
+ typedef std::int32_t AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 1> >
+ Format;
+ static void Run(OperandType* lhs_ptr, OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ asm volatile(
+ // Load accumulators
+ "ld.w $w0, (0*16)(%[accum_ptr])\n"
+ "ld.w $w4, (1*16)(%[accum_ptr])\n"
+ "ld.w $w8, (2*16)(%[accum_ptr])\n"
+ "ld.w $w1, (3*16)(%[accum_ptr])\n"
+ "ld.w $w5, (4*16)(%[accum_ptr])\n"
+ "ld.w $w9, (5*16)(%[accum_ptr])\n"
+ "ld.w $w2, (6*16)(%[accum_ptr])\n"
+ "ld.w $w6, (7*16)(%[accum_ptr])\n"
+ "ld.w $w10, (8*16)(%[accum_ptr])\n"
+ "ld.w $w3, (9*16)(%[accum_ptr])\n"
+ "ld.w $w7, (10*16)(%[accum_ptr])\n"
+ "ld.w $w11, (11*16)(%[accum_ptr])\n"
+ // Set a temp to all zeroes.
+ "ldi.b $w19, 0\n"
+
+ GEMMLOWP_LABEL_LOOP ":\n"
+ // Overview of register layout:
+ //
+ // A 2x4 cell of Rhs is stored in 16bit in w15-w18 (each register
+ // contains 4 replicas of a pair of elements).
+ // A 12x2 block of 3 4x2 cells Lhs is stored in 16bit in w12-w14.
+ // A 12x4 block of accumulators is stored in 32bit in w0-w11.
+ //
+ // +-----+-----+-----+-----+
+ // Rhs | w15 | w16 | w17 | w18 |
+ // +-----+-----+-----+-----+
+ //
+ // | | | | |
+ //
+ // Lhs | | | | |
+ //
+ // +---+ - - - - +-----+-----+-----+-----+
+ // |w12| | w0 | w1 | w2 | w3 |
+ // |w12| | w0 | w1 | w2 | w3 |
+ // |w12| | w0 | w1 | w2 | w3 |
+ // |w12| | w0 | w1 | w2 | w3 |
+ // +---+ - - - - +-----+-----+-----+-----+
+ // |w13| | w4 | w5 | w6 | w7 |
+ // |w13| | w4 | w5 | w6 | w7 |
+ // |w13| | w4 | w5 | w6 | w7 |
+ // |w13| | w4 | w5 | w6 | w7 |
+ // +---+ - - - - +-----+-----+-----+-----+
+ // |w14| | w8 | w9 | w10 | w11 |
+ // |w14| | w8 | w9 | w10 | w11 |
+ // |w14| | w8 | w9 | w10 | w11 |
+ // |w14| | w8 | w9 | w10 | w11 |
+ // +---+ - - - - +-----+-----+-----+-----+
+ //
+ // Accumulators
+
+ // Load 3 x 8 bytes of lhs[] with 2 16-byte overlapped loads.
+ "ld.b $w12, 0(%[lhs_ptr])\n"
+ "ld.b $w13, 8(%[lhs_ptr])\n"
+
+ // Load 4 bytes of rhs[] for depth 0.
+ "lbu $a0, 0(%[rhs_ptr])\n"
+ "lbu $a1, 1(%[rhs_ptr])\n"
+ "lbu $a2, 2(%[rhs_ptr])\n"
+ "lbu $a3, 3(%[rhs_ptr])\n"
+ // Load 4 bytes of rhs[] for depth 1.
+ "lbu $v0, 4(%[rhs_ptr])\n"
+ "lbu $v1, 5(%[rhs_ptr])\n"
+ "lbu $t8, 6(%[rhs_ptr])\n"
+ "lbu $t9, 7(%[rhs_ptr])\n"
+
+ // Zero-extend 8-bit elements of lhs[] to 16 bits.
+ "ilvr.b $w12, $w19, $w12\n"
+ "ilvl.b $w14, $w19, $w13\n"
+ "ilvr.b $w13, $w19, $w13\n"
+ // Interleave depth 0 and depth 1 elements of lhs[] for dpadd_u.w.
+ "ilvl.d $w15, $w19, $w12\n"
+ "ilvl.d $w16, $w19, $w13\n"
+ "ilvl.d $w17, $w19, $w14\n"
+ "ilvr.h $w12, $w15, $w12\n"
+ "ilvr.h $w13, $w16, $w13\n"
+ "ilvr.h $w14, $w17, $w14\n"
+
+ // Combine and interleave depth 0 and depth 1 elements of rhs[] for dpadd_u.w.
+ "ins $a0, $v0, 16, 8\n"
+ "ins $a1, $v1, 16, 8\n"
+ "ins $a2, $t8, 16, 8\n"
+ "ins $a3, $t9, 16, 8\n"
+ // Make 4 replicas of every pair of rhs[] elements.
+ "fill.w $w15, $a0\n"
+ "fill.w $w16, $a1\n"
+ "fill.w $w17, $a2\n"
+ "fill.w $w18, $a3\n"
+
+ // Depths 0 and 1.
+ // Dot-product-(and)-add doubles multiplicand width.
+ "dpadd_u.w $w0, $w12, $w15\n"
+ "dpadd_u.w $w4, $w13, $w15\n"
+ "dpadd_u.w $w8, $w14, $w15\n"
+ "dpadd_u.w $w1, $w12, $w16\n"
+ "dpadd_u.w $w5, $w13, $w16\n"
+ "dpadd_u.w $w9, $w14, $w16\n"
+ "dpadd_u.w $w2, $w12, $w17\n"
+ "dpadd_u.w $w6, $w13, $w17\n"
+ "dpadd_u.w $w10, $w14, $w17\n"
+ "dpadd_u.w $w3, $w12, $w18\n"
+ "dpadd_u.w $w7, $w13, $w18\n"
+ "dpadd_u.w $w11, $w14, $w18\n"
+
+ "addiu %[depth], -2\n"
+ GEMMLOWP_MIPS_XADDIU " %[lhs_ptr], 24\n"
+ GEMMLOWP_MIPS_XADDIU " %[rhs_ptr], 8\n"
+ "bnez %[depth]," GEMMLOWP_LABEL_LOOP "b\n"
+
+ // Store accumulators.
+ "st.w $w0, (0*16)(%[accum_ptr])\n"
+ "st.w $w4, (1*16)(%[accum_ptr])\n"
+ "st.w $w8, (2*16)(%[accum_ptr])\n"
+ "st.w $w1, (3*16)(%[accum_ptr])\n"
+ "st.w $w5, (4*16)(%[accum_ptr])\n"
+ "st.w $w9, (5*16)(%[accum_ptr])\n"
+ "st.w $w2, (6*16)(%[accum_ptr])\n"
+ "st.w $w6, (7*16)(%[accum_ptr])\n"
+ "st.w $w10, (8*16)(%[accum_ptr])\n"
+ "st.w $w3, (9*16)(%[accum_ptr])\n"
+ "st.w $w7, (10*16)(%[accum_ptr])\n"
+ "st.w $w11, (11*16)(%[accum_ptr])\n"
+ : // outputs
+ [lhs_ptr] "+r"(lhs_ptr), [rhs_ptr] "+r"(rhs_ptr),
+ [depth] "+r"(depth)
+ : // inputs
+ [accum_ptr] "r"(accum_ptr)
+ : // clobbers
+ "memory",
+ "v0", "v1",
+ "a0", "a1", "a2", "a3",
+ "t8", "t9",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19");
+ }
+};
+
+// Using 32x32=32 multiplications.
+// 32 MSA regs used:
+// - 24 accumulators
+// - 6 lhs
+// - 1 rhs
+// - 1 temps/zeroes
+// ~95 instructions in the loop.
+struct MSA_GEMM_12x8_Uint8Operands_Uint32Accumulators_intrinsics {
+ typedef std::uint8_t OperandType;
+ typedef std::uint32_t AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 2> >
+ Format;
+ static void Run(const OperandType* lhs_ptr, const OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ const v16i8 zeroes = __builtin_msa_ldi_b(0);
+ v4i32 acc[3][8];
+ // Load accumulators.
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 8; j++) {
+ acc[i][j] = __builtin_msa_ld_w(accum_ptr + 4 * (i + 3 * j), 0);
+ }
+ }
+
+ while (depth > 0) {
+ // Load 3 x 8 bytes of lhs[] with 2 16-byte overlapped loads.
+ v8i16 lhs[6];
+ lhs[0] = reinterpret_cast<v8i16>(__builtin_msa_ld_b(const_cast<OperandType*>(lhs_ptr), 0));
+ lhs[1] =
+ reinterpret_cast<v8i16>(__builtin_msa_ld_b(const_cast<OperandType*>(lhs_ptr + 8), 0));
+
+ // Zero-extend 8-bit elements of lhs[] to 16 bits.
+ lhs[0] = reinterpret_cast<v8i16>(__builtin_msa_ilvr_b(zeroes,
+ reinterpret_cast<v16i8>(lhs[0])));
+ lhs[2] = reinterpret_cast<v8i16>(__builtin_msa_ilvl_b(zeroes,
+ reinterpret_cast<v16i8>(lhs[1])));
+ lhs[1] = reinterpret_cast<v8i16>(__builtin_msa_ilvr_b(zeroes,
+ reinterpret_cast<v16i8>(lhs[1])));
+
+ // Zero-extend 16-bit elements of lhs[] to 32 bits.
+ lhs[3] = __builtin_msa_ilvl_h(reinterpret_cast<v8i16>(zeroes), lhs[0]);
+ lhs[4] = __builtin_msa_ilvl_h(reinterpret_cast<v8i16>(zeroes), lhs[1]);
+ lhs[5] = __builtin_msa_ilvl_h(reinterpret_cast<v8i16>(zeroes), lhs[2]);
+ lhs[0] = __builtin_msa_ilvr_h(reinterpret_cast<v8i16>(zeroes), lhs[0]);
+ lhs[1] = __builtin_msa_ilvr_h(reinterpret_cast<v8i16>(zeroes), lhs[1]);
+ lhs[2] = __builtin_msa_ilvr_h(reinterpret_cast<v8i16>(zeroes), lhs[2]);
+
+ // Depth 0.
+ for (int j = 0; j < 4; j++) {
+ // Load 1 byte of rhs, making 4 32-bit replicas of it.
+ v4i32 rhs = reinterpret_cast<v4i32>(__builtin_msa_fill_w(rhs_ptr[j]));
+ // Multiply-add into accumulators.
+ for (int i = 0; i < 3; i++) {
+ acc[i][j] = __builtin_msa_maddv_w(acc[i][j], reinterpret_cast<v4i32>(lhs[i]), rhs);
+ }
+ }
+ for (int j = 4; j < 8; j++) {
+ // Load 1 byte of rhs, making 4 32-bit replicas of it.
+ v4i32 rhs = reinterpret_cast<v4i32>(__builtin_msa_fill_w(rhs_ptr[j + 4]));
+ // Multiply-add into accumulators.
+ for (int i = 0; i < 3; i++) {
+ acc[i][j] = __builtin_msa_maddv_w(acc[i][j], reinterpret_cast<v4i32>(lhs[i]), rhs);
+ }
+ }
+
+ // Depth 1.
+ for (int j = 0; j < 4; j++) {
+ // Load 1 byte of rhs, making 4 32-bit replicas of it.
+ v4i32 rhs = reinterpret_cast<v4i32>(__builtin_msa_fill_w(rhs_ptr[j + 4]));
+ // Multiply-add into accumulators.
+ for (int i = 0; i < 3; i++) {
+ acc[i][j] = __builtin_msa_maddv_w(acc[i][j], reinterpret_cast<v4i32>(lhs[i + 3]), rhs);
+ }
+ }
+ for (int j = 4; j < 8; j++) {
+ // Load 1 byte of rhs, making 4 32-bit replicas of it.
+ v4i32 rhs = reinterpret_cast<v4i32>(__builtin_msa_fill_w(rhs_ptr[j + 8]));
+ // Multiply-add into accumulators.
+ for (int i = 0; i < 3; i++) {
+ acc[i][j] = __builtin_msa_maddv_w(acc[i][j], reinterpret_cast<v4i32>(lhs[i + 3]), rhs);
+ }
+ }
+
+ lhs_ptr += 24;
+ rhs_ptr += 16;
+ depth -= 2;
+ }
+
+ // Store accumulators.
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 8; j++) {
+ __builtin_msa_st_w(acc[i][j], accum_ptr + 4 * (i + 3 * j), 0);
+ }
+ }
+ }
+};
+
+// Assembly implementation of the above
+// MSA_GEMM_12x8_Uint8Operands_Uint32Accumulators_intrinsics.
+// Using 32x32=32 multiplications.
+// 32 MSA regs used:
+// - 24 accumulators
+// - 6 lhs
+// - 1 rhs
+// - 1 temps/zeroes
+// ~95 instructions in the loop.
+struct MSA_GEMM_12x8_Uint8Operands_Uint32Accumulators_assembly {
+ typedef std::uint8_t OperandType;
+ typedef std::uint32_t AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 2> >
+ Format;
+ static void Run(OperandType* lhs_ptr, OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ asm volatile(
+ // Load accumulators
+ "ld.w $w0, (0*16)(%[accum_ptr])\n"
+ "ld.w $w4, (1*16)(%[accum_ptr])\n"
+ "ld.w $w8, (2*16)(%[accum_ptr])\n"
+ "ld.w $w1, (3*16)(%[accum_ptr])\n"
+ "ld.w $w5, (4*16)(%[accum_ptr])\n"
+ "ld.w $w9, (5*16)(%[accum_ptr])\n"
+ "ld.w $w2, (6*16)(%[accum_ptr])\n"
+ "ld.w $w6, (7*16)(%[accum_ptr])\n"
+ "ld.w $w10, (8*16)(%[accum_ptr])\n"
+ "ld.w $w3, (9*16)(%[accum_ptr])\n"
+ "ld.w $w7, (10*16)(%[accum_ptr])\n"
+ "ld.w $w11, (11*16)(%[accum_ptr])\n"
+ "ld.w $w12, (12*16)(%[accum_ptr])\n"
+ "ld.w $w16, (13*16)(%[accum_ptr])\n"
+ "ld.w $w20, (14*16)(%[accum_ptr])\n"
+ "ld.w $w13, (15*16)(%[accum_ptr])\n"
+ "ld.w $w17, (16*16)(%[accum_ptr])\n"
+ "ld.w $w21, (17*16)(%[accum_ptr])\n"
+ "ld.w $w14, (18*16)(%[accum_ptr])\n"
+ "ld.w $w18, (19*16)(%[accum_ptr])\n"
+ "ld.w $w22, (20*16)(%[accum_ptr])\n"
+ "ld.w $w15, (21*16)(%[accum_ptr])\n"
+ "ld.w $w19, (22*16)(%[accum_ptr])\n"
+ "ld.w $w23, (23*16)(%[accum_ptr])\n"
+ // Set a temp to all zeroes.
+ "ldi.b $w31, 0\n"
+
+ GEMMLOWP_LABEL_LOOP ":\n"
+ // Overview of register layout:
+ //
+ // A quarter of the 2 2x4 cells of Rhs is stored in 32bit in w30.
+ // A 12x2 block of 3 4x2 cells Lhs is stored in 32bit in w24-w29.
+ // A 12x8 block of accumulators is stored in 32bit in w0-w23.
+ //
+ // +------+------+------+------+
+ // Rhs |w30[0]|w30[1]|w30[2]|w30[3]|
+ // +------+------+------+------+
+ //
+ // | | | | |
+ //
+ // Lhs | | | | |
+ //
+ // +---+---+ - - - - +------+------+------+------+
+ // |w24|w27| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24|w27| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24|w27| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24|w27| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // +---+---+ - - - - +------+------+------+------+
+ // |w25|w28| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25|w28| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25|w28| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25|w28| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // +---+---+ - - - - +------+------+------+------+
+ // |w26|w29| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26|w29| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26|w29| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26|w29| |w8/20 |w9/21 |w10/22|w11/23|
+ // +---+---+ - - - - +------+------+------+------+
+ //
+ // Accumulator
+
+ // Load 3 x 8 bytes of lhs[] with 2 16-byte overlapped loads.
+ "ld.b $w24, 0(%[lhs_ptr])\n"
+ "ld.b $w25, 8(%[lhs_ptr])\n"
+
+ // Load 4 bytes of rhs[] for the first half of depth 0.
+ "lbu $a0, 0(%[rhs_ptr])\n"
+ "lbu $a1, 1(%[rhs_ptr])\n"
+ "lbu $a2, 2(%[rhs_ptr])\n"
+ "lbu $a3, 3(%[rhs_ptr])\n"
+
+ // Zero-extend 8-bit elements of lhs[] to 16 bits.
+ "ilvr.b $w24, $w31, $w24\n"
+ "ilvl.b $w26, $w31, $w25\n"
+ "ilvr.b $w25, $w31, $w25\n"
+ // Zero-extend 16-bit elements of lhs[] to 32 bits.
+ "ilvl.h $w27, $w31, $w24\n"
+ "ilvl.h $w28, $w31, $w25\n"
+ "ilvl.h $w29, $w31, $w26\n"
+ "ilvr.h $w24, $w31, $w24\n"
+ "ilvr.h $w25, $w31, $w25\n"
+ "ilvr.h $w26, $w31, $w26\n"
+
+ // Depth 0.
+ "fill.w $w30, $a0\n"
+ "lbu $a0, 8(%[rhs_ptr])\n"
+ "maddv.w $w0, $w24, $w30\n"
+ "maddv.w $w4, $w25, $w30\n"
+ "maddv.w $w8, $w26, $w30\n"
+ "fill.w $w30, $a1\n"
+ "lbu $a1, 9(%[rhs_ptr])\n"
+ "maddv.w $w1, $w24, $w30\n"
+ "maddv.w $w5, $w25, $w30\n"
+ "maddv.w $w9, $w26, $w30\n"
+ "fill.w $w30, $a2\n"
+ "lbu $a2, 10(%[rhs_ptr])\n"
+ "maddv.w $w2, $w24, $w30\n"
+ "maddv.w $w6, $w25, $w30\n"
+ "maddv.w $w10, $w26, $w30\n"
+ "fill.w $w30, $a3\n"
+ "lbu $a3, 11(%[rhs_ptr])\n"
+ "maddv.w $w3, $w24, $w30\n"
+ "maddv.w $w7, $w25, $w30\n"
+ "maddv.w $w11, $w26, $w30\n"
+
+ "fill.w $w30, $a0\n"
+ "lbu $a0, 4(%[rhs_ptr])\n"
+ "maddv.w $w12, $w24, $w30\n"
+ "maddv.w $w16, $w25, $w30\n"
+ "maddv.w $w20, $w26, $w30\n"
+ "fill.w $w30, $a1\n"
+ "lbu $a1, 5(%[rhs_ptr])\n"
+ "maddv.w $w13, $w24, $w30\n"
+ "maddv.w $w17, $w25, $w30\n"
+ "maddv.w $w21, $w26, $w30\n"
+ "fill.w $w30, $a2\n"
+ "lbu $a2, 6(%[rhs_ptr])\n"
+ "maddv.w $w14, $w24, $w30\n"
+ "maddv.w $w18, $w25, $w30\n"
+ "maddv.w $w22, $w26, $w30\n"
+ "fill.w $w30, $a3\n"
+ "lbu $a3, 7(%[rhs_ptr])\n"
+ "maddv.w $w15, $w24, $w30\n"
+ "maddv.w $w19, $w25, $w30\n"
+ "maddv.w $w23, $w26, $w30\n"
+
+ // Depth 1.
+ "fill.w $w30, $a0\n"
+ "lbu $a0, 12(%[rhs_ptr])\n"
+ "maddv.w $w0, $w27, $w30\n"
+ "maddv.w $w4, $w28, $w30\n"
+ "maddv.w $w8, $w29, $w30\n"
+ "fill.w $w30, $a1\n"
+ "lbu $a1, 13(%[rhs_ptr])\n"
+ "maddv.w $w1, $w27, $w30\n"
+ "maddv.w $w5, $w28, $w30\n"
+ "maddv.w $w9, $w29, $w30\n"
+ "fill.w $w30, $a2\n"
+ "lbu $a2, 14(%[rhs_ptr])\n"
+ "maddv.w $w2, $w27, $w30\n"
+ "maddv.w $w6, $w28, $w30\n"
+ "maddv.w $w10, $w29, $w30\n"
+ "fill.w $w30, $a3\n"
+ "lbu $a3, 15(%[rhs_ptr])\n"
+ "maddv.w $w3, $w27, $w30\n"
+ "maddv.w $w7, $w28, $w30\n"
+ "maddv.w $w11, $w29, $w30\n"
+
+ "fill.w $w30, $a0\n"
+ "maddv.w $w12, $w27, $w30\n"
+ "maddv.w $w16, $w28, $w30\n"
+ "maddv.w $w20, $w29, $w30\n"
+ "fill.w $w30, $a1\n"
+ "maddv.w $w13, $w27, $w30\n"
+ "maddv.w $w17, $w28, $w30\n"
+ "maddv.w $w21, $w29, $w30\n"
+ "fill.w $w30, $a2\n"
+ "maddv.w $w14, $w27, $w30\n"
+ "maddv.w $w18, $w28, $w30\n"
+ "maddv.w $w22, $w29, $w30\n"
+ "fill.w $w30, $a3\n"
+ "maddv.w $w15, $w27, $w30\n"
+ "maddv.w $w19, $w28, $w30\n"
+ "maddv.w $w23, $w29, $w30\n"
+
+ "addiu %[depth], -2\n"
+ GEMMLOWP_MIPS_XADDIU " %[lhs_ptr], 24\n"
+ GEMMLOWP_MIPS_XADDIU " %[rhs_ptr], 16\n"
+ "bnez %[depth]," GEMMLOWP_LABEL_LOOP "b\n"
+
+ // Store accumulators.
+ "st.w $w0, (0*16)(%[accum_ptr])\n"
+ "st.w $w4, (1*16)(%[accum_ptr])\n"
+ "st.w $w8, (2*16)(%[accum_ptr])\n"
+ "st.w $w1, (3*16)(%[accum_ptr])\n"
+ "st.w $w5, (4*16)(%[accum_ptr])\n"
+ "st.w $w9, (5*16)(%[accum_ptr])\n"
+ "st.w $w2, (6*16)(%[accum_ptr])\n"
+ "st.w $w6, (7*16)(%[accum_ptr])\n"
+ "st.w $w10, (8*16)(%[accum_ptr])\n"
+ "st.w $w3, (9*16)(%[accum_ptr])\n"
+ "st.w $w7, (10*16)(%[accum_ptr])\n"
+ "st.w $w11, (11*16)(%[accum_ptr])\n"
+ "st.w $w12, (12*16)(%[accum_ptr])\n"
+ "st.w $w16, (13*16)(%[accum_ptr])\n"
+ "st.w $w20, (14*16)(%[accum_ptr])\n"
+ "st.w $w13, (15*16)(%[accum_ptr])\n"
+ "st.w $w17, (16*16)(%[accum_ptr])\n"
+ "st.w $w21, (17*16)(%[accum_ptr])\n"
+ "st.w $w14, (18*16)(%[accum_ptr])\n"
+ "st.w $w18, (19*16)(%[accum_ptr])\n"
+ "st.w $w22, (20*16)(%[accum_ptr])\n"
+ "st.w $w15, (21*16)(%[accum_ptr])\n"
+ "st.w $w19, (22*16)(%[accum_ptr])\n"
+ "st.w $w23, (23*16)(%[accum_ptr])\n"
+ : // outputs
+ [lhs_ptr] "+r"(lhs_ptr), [rhs_ptr] "+r"(rhs_ptr),
+ [depth] "+r"(depth)
+ : // inputs
+ [accum_ptr] "r"(accum_ptr)
+ : // clobbers
+ "memory",
+ "a0", "a1", "a2", "a3",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31");
+ }
+};
+
+// Assembly implementation of the above
+// MSA_GEMM_12x8_Uint8Operands_Uint32Accumulators_intrinsics2 (TODO).
+// Using 16x16=32 multiplications.
+// 32 MSA regs used:
+// - 24 accumulators
+// - 3 lhs
+// - 4 rhs
+// - 1 temps/zeroes
+// ~70 instructions in the loop.
+struct MSA_GEMM_12x8_Uint8Operands_Uint32Accumulators_assembly2 {
+ typedef std::uint8_t OperandType;
+ typedef std::uint32_t AccumulatorType;
+ typedef KernelFormat<
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 3>,
+ KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 2> >
+ Format;
+ static void Run(OperandType* lhs_ptr, OperandType* rhs_ptr,
+ AccumulatorType* accum_ptr, int depth) {
+ asm volatile(
+ // Load accumulators
+ "ld.w $w0, (0*16)(%[accum_ptr])\n"
+ "ld.w $w4, (1*16)(%[accum_ptr])\n"
+ "ld.w $w8, (2*16)(%[accum_ptr])\n"
+ "ld.w $w1, (3*16)(%[accum_ptr])\n"
+ "ld.w $w5, (4*16)(%[accum_ptr])\n"
+ "ld.w $w9, (5*16)(%[accum_ptr])\n"
+ "ld.w $w2, (6*16)(%[accum_ptr])\n"
+ "ld.w $w6, (7*16)(%[accum_ptr])\n"
+ "ld.w $w10, (8*16)(%[accum_ptr])\n"
+ "ld.w $w3, (9*16)(%[accum_ptr])\n"
+ "ld.w $w7, (10*16)(%[accum_ptr])\n"
+ "ld.w $w11, (11*16)(%[accum_ptr])\n"
+ "ld.w $w12, (12*16)(%[accum_ptr])\n"
+ "ld.w $w16, (13*16)(%[accum_ptr])\n"
+ "ld.w $w20, (14*16)(%[accum_ptr])\n"
+ "ld.w $w13, (15*16)(%[accum_ptr])\n"
+ "ld.w $w17, (16*16)(%[accum_ptr])\n"
+ "ld.w $w21, (17*16)(%[accum_ptr])\n"
+ "ld.w $w14, (18*16)(%[accum_ptr])\n"
+ "ld.w $w18, (19*16)(%[accum_ptr])\n"
+ "ld.w $w22, (20*16)(%[accum_ptr])\n"
+ "ld.w $w15, (21*16)(%[accum_ptr])\n"
+ "ld.w $w19, (22*16)(%[accum_ptr])\n"
+ "ld.w $w23, (23*16)(%[accum_ptr])\n"
+ // Set a temp to all zeroes.
+ "ldi.b $w31, 0\n"
+
+ GEMMLOWP_LABEL_LOOP ":\n"
+ // Overview of register layout:
+ //
+ // A half of the 2 2x4 cells of Rhs is stored in 16bit in w27-w30
+ // (each register contains 4 replicas of a pair of elements).
+ // A 12x2 block of 3 4x2 cells Lhs is stored in 16bit in w24-w26.
+ // A 12x8 block of accumulators is stored in 32bit in w0-w23.
+ //
+ // +------+------+------+------+
+ // Rhs |w27 |w28 |w29 |w30 |
+ // +------+------+------+------+
+ //
+ // | | | | |
+ //
+ // Lhs | | | | |
+ //
+ // +---+ - - - - +------+------+------+------+
+ // |w24| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // |w24| |w0/12 |w1/13 |w2/14 |w3/15 |
+ // +---+ - - - - +------+------+------+------+
+ // |w25| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // |w25| |w4/16 |w5/17 |w6/18 |w7/19 |
+ // +---+ - - - - +------+------+------+------+
+ // |w26| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26| |w8/20 |w9/21 |w10/22|w11/23|
+ // |w26| |w8/20 |w9/21 |w10/22|w11/23|
+ // +---+ - - - - +------+------+------+------+
+ //
+ // Accumulators
+
+ // Load 3 x 8 bytes of lhs[] with 2 16-byte overlapped loads.
+ "ld.b $w24, 0(%[lhs_ptr])\n"
+ "ld.b $w25, 8(%[lhs_ptr])\n"
+
+ // Load 4 bytes of rhs[] for the first half of depth 0.
+ "lbu $a0, 0(%[rhs_ptr])\n"
+ "lbu $a1, 1(%[rhs_ptr])\n"
+ "lbu $a2, 2(%[rhs_ptr])\n"
+ "lbu $a3, 3(%[rhs_ptr])\n"
+ // Load 4 bytes of rhs[] for the first half of depth 1.
+ "lbu $v0, 4(%[rhs_ptr])\n"
+ "lbu $v1, 5(%[rhs_ptr])\n"
+ "lbu $t8, 6(%[rhs_ptr])\n"
+ "lbu $t9, 7(%[rhs_ptr])\n"
+
+ // Zero-extend 8-bit elements of lhs[] to 16 bits.
+ "ilvr.b $w24, $w31, $w24\n"
+ "ilvl.b $w26, $w31, $w25\n"
+ "ilvr.b $w25, $w31, $w25\n"
+ // Interleave depth 0 and depth 1 elements of lhs[] for dpadd_u.w.
+ "ilvl.d $w27, $w31, $w24\n"
+ "ilvl.d $w28, $w31, $w25\n"
+ "ilvl.d $w29, $w31, $w26\n"
+ "ilvr.h $w24, $w27, $w24\n"
+ "ilvr.h $w25, $w28, $w25\n"
+ "ilvr.h $w26, $w29, $w26\n"
+
+ // Combine and interleave depth 0 and depth 1 elements of rhs[] for dpadd_u.w
+ // (for the first half).
+ "ins $a0, $v0, 16, 8\n"
+ "ins $a1, $v1, 16, 8\n"
+ "ins $a2, $t8, 16, 8\n"
+ "ins $a3, $t9, 16, 8\n"
+ // Make 4 replicas of every pair of rhs[] elements.
+ "fill.w $w27, $a0\n"
+ "fill.w $w28, $a1\n"
+ "fill.w $w29, $a2\n"
+ "fill.w $w30, $a3\n"
+
+ // Load 4 bytes of rhs[] for the second half of depth 0.
+ "lbu $a0, 8(%[rhs_ptr])\n"
+ "lbu $a1, 9(%[rhs_ptr])\n"
+ "lbu $a2, 10(%[rhs_ptr])\n"
+ "lbu $a3, 11(%[rhs_ptr])\n"
+ // Load 4 bytes of rhs[] for the second half of depth 1.
+ "lbu $v0, 12(%[rhs_ptr])\n"
+ "lbu $v1, 13(%[rhs_ptr])\n"
+ "lbu $t8, 14(%[rhs_ptr])\n"
+ "lbu $t9, 15(%[rhs_ptr])\n"
+
+ // First half of depths 0 and 1.
+ // Dot-product-(and)-add doubles multiplicand width.
+ "dpadd_u.w $w0, $w24, $w27\n"
+ "dpadd_u.w $w4, $w25, $w27\n"
+ "dpadd_u.w $w8, $w26, $w27\n"
+ "dpadd_u.w $w1, $w24, $w28\n"
+ "dpadd_u.w $w5, $w25, $w28\n"
+ "dpadd_u.w $w9, $w26, $w28\n"
+ "dpadd_u.w $w2, $w24, $w29\n"
+ "dpadd_u.w $w6, $w25, $w29\n"
+ "dpadd_u.w $w10, $w26, $w29\n"
+ "dpadd_u.w $w3, $w24, $w30\n"
+ "dpadd_u.w $w7, $w25, $w30\n"
+ "dpadd_u.w $w11, $w26, $w30\n"
+
+ // Combine and interleave depth 0 and depth 1 elements of rhs[] for dpadd_u.w
+ // (for the second half).
+ "ins $a0, $v0, 16, 8\n"
+ "ins $a1, $v1, 16, 8\n"
+ "ins $a2, $t8, 16, 8\n"
+ "ins $a3, $t9, 16, 8\n"
+ // Make 4 replicas of every pair of rhs[] elements.
+ "fill.w $w27, $a0\n"
+ "fill.w $w28, $a1\n"
+ "fill.w $w29, $a2\n"
+ "fill.w $w30, $a3\n"
+
+ // Second half of depths 0 and 1.
+ // Dot-product-(and)-add doubles multiplicand width.
+ "dpadd_u.w $w12, $w24, $w27\n"
+ "dpadd_u.w $w16, $w25, $w27\n"
+ "dpadd_u.w $w20, $w26, $w27\n"
+ "dpadd_u.w $w13, $w24, $w28\n"
+ "dpadd_u.w $w17, $w25, $w28\n"
+ "dpadd_u.w $w21, $w26, $w28\n"
+ "dpadd_u.w $w14, $w24, $w29\n"
+ "dpadd_u.w $w18, $w25, $w29\n"
+ "dpadd_u.w $w22, $w26, $w29\n"
+ "dpadd_u.w $w15, $w24, $w30\n"
+ "dpadd_u.w $w19, $w25, $w30\n"
+ "dpadd_u.w $w23, $w26, $w30\n"
+
+ "addiu %[depth], -2\n"
+ GEMMLOWP_MIPS_XADDIU " %[lhs_ptr], 24\n"
+ GEMMLOWP_MIPS_XADDIU " %[rhs_ptr], 16\n"
+ "bnez %[depth]," GEMMLOWP_LABEL_LOOP "b\n"
+
+ // Store accumulators.
+ "st.w $w0, (0*16)(%[accum_ptr])\n"
+ "st.w $w4, (1*16)(%[accum_ptr])\n"
+ "st.w $w8, (2*16)(%[accum_ptr])\n"
+ "st.w $w1, (3*16)(%[accum_ptr])\n"
+ "st.w $w5, (4*16)(%[accum_ptr])\n"
+ "st.w $w9, (5*16)(%[accum_ptr])\n"
+ "st.w $w2, (6*16)(%[accum_ptr])\n"
+ "st.w $w6, (7*16)(%[accum_ptr])\n"
+ "st.w $w10, (8*16)(%[accum_ptr])\n"
+ "st.w $w3, (9*16)(%[accum_ptr])\n"
+ "st.w $w7, (10*16)(%[accum_ptr])\n"
+ "st.w $w11, (11*16)(%[accum_ptr])\n"
+ "st.w $w12, (12*16)(%[accum_ptr])\n"
+ "st.w $w16, (13*16)(%[accum_ptr])\n"
+ "st.w $w20, (14*16)(%[accum_ptr])\n"
+ "st.w $w13, (15*16)(%[accum_ptr])\n"
+ "st.w $w17, (16*16)(%[accum_ptr])\n"
+ "st.w $w21, (17*16)(%[accum_ptr])\n"
+ "st.w $w14, (18*16)(%[accum_ptr])\n"
+ "st.w $w18, (19*16)(%[accum_ptr])\n"
+ "st.w $w22, (20*16)(%[accum_ptr])\n"
+ "st.w $w15, (21*16)(%[accum_ptr])\n"
+ "st.w $w19, (22*16)(%[accum_ptr])\n"
+ "st.w $w23, (23*16)(%[accum_ptr])\n"
+ : // outputs
+ [lhs_ptr] "+r"(lhs_ptr), [rhs_ptr] "+r"(rhs_ptr),
+ [depth] "+r"(depth)
+ : // inputs
+ [accum_ptr] "r"(accum_ptr)
+ : // clobbers
+ "memory",
+ "v0", "v1",
+ "a0", "a1", "a2", "a3",
+ "t8", "t9",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31");
+ }
+};
+#endif // __mips
// BEGIN code copied from gemmlowp/internal/kernel_reference.h
data_ = nullptr;
// Adds a few bytes of padding here, because the 64-bit 'A57' kernel
// reads one iteration past the end the buffer, causing a crash on iOS.
- posix_memalign(reinterpret_cast<void**>(&data_), kCacheLineSize,
- size_ * sizeof(DataType) + 16);
+ int res = posix_memalign(reinterpret_cast<void**>(&data_), kCacheLineSize,
+ size_ * sizeof(DataType) + 16);
+ (void)res;
}
~CacheLineAlignedBuffer() { free(data_); }
const DataType* data() const { return data_; }
DataType* data() { return data_; }
- const std::size_t size() const { return size_; }
+ std::size_t size() const { return size_; }
private:
const std::size_t size_;
#endif
#ifdef __aarch64__
-
BENCHMARK(NEON_64bit_GEMM_Int8Operands_AccumTwoWithin16Bits);
BENCHMARK(NEON_64bit_GEMM_Int8Operands_AccumTwoWithin16Bits_intrinsics);
BENCHMARK(NEON_64bit_GEMM_Uint8Operands_Uint32Accumulators);
BENCHMARK(NEON_64bit_GEMM_Uint8Operands_Uint32Accumulators_intrinsics);
BENCHMARK(NEON_64bit_GEMM_Uint8Operands_Uint32Accumulators_noexpand_A57);
+#ifdef __ARM_FEATURE_DOTPROD
+ BENCHMARK(NEON_64bit_GEMM_Uint8Operands_Uint32Accumulators_dotproduct);
+ BENCHMARK(NEON_64bit_GEMM_Uint8Operands_Uint32Accumulators_dotproduct_A55r1);
+#endif
BENCHMARK(NEON_64bit_GEMM_Int32_WithScalar);
BENCHMARK(NEON_64bit_GEMM_Float32_WithVectorDuplicatingScalar);
BENCHMARK(NEON_64bit_GEMM_Float32_WithScalar);
#ifndef __APPLE__
BENCHMARK(NEON_64bit_GEMM_Float32_WithScalar_A53);
#endif
+ BENCHMARK(NEON_64bit_GEMM_Float32_WithScalar_A55r1);
+#endif
+
+#ifdef __mips
+ BENCHMARK(MSA_GEMM_12x4_Uint8Operands_Uint32Accumulators_intrinsics);
+ BENCHMARK(MSA_GEMM_12x4_Uint8Operands_Uint32Accumulators_assembly);
+ BENCHMARK(MSA_GEMM_12x4_Uint8Operands_Uint32Accumulators_assembly2);
+ BENCHMARK(MSA_GEMM_12x8_Uint8Operands_Uint32Accumulators_intrinsics);
+ BENCHMARK(MSA_GEMM_12x8_Uint8Operands_Uint32Accumulators_assembly);
+ BENCHMARK(MSA_GEMM_12x8_Uint8Operands_Uint32Accumulators_assembly2);
#endif
return 0;
// See the License for the specific language governing permissions and
// limitations under the License.
-#include <unistd.h>
#ifdef __APPLE__
#include <sys/time.h>
#endif
#warning "Building without NEON support on ARM, check your compiler setup!"
#endif
+#if defined(__mips) && !defined(GEMMLOWP_MSA)
+#warning "Building without MSA support on MIPS, check your compiler setup!"
+#endif
+
#if defined(__SSE4_2__) && !defined(GEMMLOWP_SSE4)
#warning \
"Building without SSE4.2 support on SSE4.2 enabled machine, check your compiler setup!"
namespace gemmlowp {
-double time() {
-#ifdef __APPLE__
- timeval t;
- gettimeofday(&t, nullptr);
- return t.tv_sec + 1e-6 * t.tv_usec;
-#else
- timespec t;
- clock_gettime(CLOCK_REALTIME, &t);
- return t.tv_sec + 1e-9 * t.tv_nsec;
-#endif
-}
-
const double min_accurate_duration = 1e-1;
const std::size_t min_working_set_size = 16 * 1024 * 1024;
std::size_t pool_index = 0;
while (true) {
- double starttime = time();
+ double starttime = real_time_in_seconds();
for (int i = 0; i < iters_at_a_time; i++) {
for (size_t j = 0; j < gemms.size(); j++) {
- int k = pool_index * gemms.size() + j;
+ size_t k = pool_index * gemms.size() + j;
Gemm<std::uint8_t, GEMMLOWP_TEST_BIT_DEPTH_PARAMS>(
context, lhs[k].const_map(), rhs[k].const_map(), &result[k].map(),
-75, -91, 74980, 123, 20);
pool_index = 0;
}
}
- double endtime = time();
+ double endtime = real_time_in_seconds();
const float timing = static_cast<float>(endtime - starttime);
gemmlowp::StartProfiling();
#endif
- double starttime = time();
- while (time() < starttime + mintime) {
+ double starttime = real_time_in_seconds();
+ while (real_time_in_seconds() < starttime + mintime) {
gemm_times.push_back(
time_for_gemms<LhsType, RhsType, ResultType>(context, gemms));
}
--- /dev/null
+// Example command line to build on Android ARM64:
+/*
+~/android/toolchains/r15c-aarch64/bin/aarch64-linux-android-clang++ \
+test/benchmark_all_sizes.cc -o /tmp/b -O3 --std=c++11 -fPIE -static \
+-DBENCHMARK_QUICK -DBENCHMARK_8bit
+*/
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <ctime>
+#include <iostream>
+#include <map>
+#include <random>
+#include <set>
+
+#include "../public/gemmlowp.h"
+
+#if defined GEMMLOWP_ANDROID && defined GEMMLOWP_ARM_32
+// Compilation workaround
+namespace std {
+ using ::round;
+}
+#endif
+
+// Minimum duration of each benchmark measurement. Also, duration
+// of sleep time between each two consecutive benchmark measurements to
+// prevent over-heating.
+const double kBenchmarkSecs = 0.1;
+
+// Sleep time before each benchmark.
+const int kCooldownBeforeBenchmarkSecs = 0;
+
+// Number of benchmark passes.
+const int kPasses = 4;
+
+#ifdef BENCHMARK_NUM_THREADS
+const int kNumThreads = BENCHMARK_NUM_THREADS;
+#else
+const int kNumThreads = 1;
+#endif
+
+namespace gemmlowp {
+
+// gemmlowp itself doesn't have a Matrix class, only a MatrixMap class,
+// since it only maps existing data. In tests though, we need to
+// create our own matrices.
+template <typename tScalar, MapOrder tOrder>
+class Matrix : public MatrixMap<tScalar, tOrder> {
+ public:
+ typedef MatrixMap<tScalar, tOrder> Map;
+ typedef MatrixMap<const tScalar, tOrder> ConstMap;
+ typedef typename Map::Scalar Scalar;
+ static const MapOrder Order = tOrder;
+ using Map::cols_;
+ using Map::data_;
+ using Map::kOrder;
+ using Map::rows_;
+ using Map::stride_;
+
+ public:
+ Matrix() : Map(nullptr, 0, 0, 0) {}
+
+ Matrix(int rows, int cols) : Map(nullptr, 0, 0, 0) { Resize(rows, cols); }
+
+ Matrix(const Matrix& other) : Map(nullptr, 0, 0, 0) { *this = other; }
+
+ Matrix& operator=(const Matrix& other) {
+ Resize(other.rows_, other.cols_);
+ std::memcpy(data_, other.data_, size() * sizeof(Scalar));
+ return *this;
+ }
+
+ friend bool operator==(const Matrix& a, const Matrix& b) {
+ return a.rows_ == b.rows_ && a.cols_ == b.cols_ &&
+ !std::memcmp(a.data_, b.data_, a.size());
+ }
+
+ void Resize(int rows, int cols) {
+ rows_ = rows;
+ cols_ = cols;
+ stride_ = kOrder == MapOrder::ColMajor ? rows : cols;
+ storage.resize(size());
+ data_ = storage.data();
+ }
+
+ int size() const { return rows_ * cols_; }
+
+ Map& map() { return *static_cast<Map*>(this); }
+
+ ConstMap const_map() const { return ConstMap(data_, rows_, cols_, stride_); }
+
+ protected:
+ std::vector<Scalar> storage;
+};
+
+template <typename MatrixType>
+void MakeZero(MatrixType* m) {
+ for (int c = 0; c < m->cols(); c++) {
+ for (int r = 0; r < m->rows(); r++) {
+ (*m)(r, c) = 128;
+ }
+ }
+}
+
+} // end namespace gemmlowp
+
+template <typename BitDepthParams>
+float benchmark_8bit(int rows, int depth, int cols) {
+ using namespace gemmlowp;
+ typedef Matrix<std::uint8_t, MapOrder::RowMajor> LhsType;
+ typedef Matrix<std::uint8_t, MapOrder::ColMajor> RhsType;
+ typedef Matrix<std::uint8_t, MapOrder::ColMajor> ResultType;
+
+ LhsType lhs;
+ RhsType rhs;
+ ResultType result;
+ lhs.Resize(rows, depth);
+ rhs.Resize(depth, cols);
+ result.Resize(rows, cols);
+ MakeZero(&lhs);
+ MakeZero(&rhs);
+ MakeZero(&result);
+
+ typedef std::tuple<OutputStageQuantizeDownInt32ByFixedPoint,
+ OutputStageSaturatingCastToUint8>
+ Pipeline;
+ gemmlowp::OutputStageQuantizeDownInt32ByFixedPoint
+ quantize_down_stage;
+ quantize_down_stage.result_offset_after_shift = 128;
+ quantize_down_stage.result_fixedpoint_multiplier = 1234567890;
+ quantize_down_stage.result_shift = 16;
+ gemmlowp::OutputStageSaturatingCastToUint8 saturating_cast_stage;
+ const auto output_pipeline =
+ std::make_tuple(quantize_down_stage, saturating_cast_stage);
+ GemmContext gemm_context;
+ gemm_context.set_max_num_threads(kNumThreads);
+ gemmlowp::GemmWithOutputPipeline<std::uint8_t, std::uint8_t, BitDepthParams>(
+ &gemm_context, lhs.const_map(), rhs.const_map(), &result.map(), -128,
+ -128, output_pipeline);
+
+ double time_start = real_time_in_seconds();
+ double t = time_start;
+ int iters = 0;
+ int iters_at_a_time = 1;
+ while (t - time_start < kBenchmarkSecs) {
+ for (int i = 0; i < iters_at_a_time; i++) {
+ gemmlowp::GemmWithOutputPipeline<std::uint8_t, std::uint8_t,
+ BitDepthParams>(
+ &gemm_context, lhs.const_map(), rhs.const_map(), &result.map(), -128,
+ -128, output_pipeline);
+ iters++;
+ }
+ iters_at_a_time *= 2;
+ t = real_time_in_seconds();
+ }
+ return (t - time_start) / iters;
+}
+
+template <typename BitDepthParams>
+float benchmark_8bit_to_32bit(int rows, int depth, int cols) {
+ using namespace gemmlowp;
+ typedef Matrix<std::uint8_t, MapOrder::RowMajor> LhsType;
+ typedef Matrix<std::uint8_t, MapOrder::ColMajor> RhsType;
+ typedef Matrix<std::int32_t, MapOrder::ColMajor> ResultType;
+
+ LhsType lhs;
+ RhsType rhs;
+ ResultType result;
+ lhs.Resize(rows, depth);
+ rhs.Resize(depth, cols);
+ result.Resize(rows, cols);
+ MakeZero(&lhs);
+ MakeZero(&rhs);
+ MakeZero(&result);
+
+ typedef std::tuple<> EmptyPipeline;
+ GemmContext gemm_context;
+ gemm_context.set_max_num_threads(kNumThreads);
+ gemmlowp::GemmWithOutputPipeline<std::uint8_t, std::int32_t, BitDepthParams>(
+ &gemm_context, lhs.const_map(), rhs.const_map(), &result.map(), -128,
+ -128, EmptyPipeline());
+
+ double time_start = real_time_in_seconds();
+ double t = time_start;
+ int iters = 0;
+ int iters_at_a_time = 1;
+ while (t - time_start < kBenchmarkSecs) {
+ for (int i = 0; i < iters_at_a_time; i++) {
+ gemmlowp::GemmWithOutputPipeline<std::uint8_t, std::int32_t,
+ BitDepthParams>(
+ &gemm_context, lhs.const_map(), rhs.const_map(), &result.map(), -128,
+ -128, EmptyPipeline());
+ iters++;
+ }
+ iters_at_a_time *= 2;
+ t = real_time_in_seconds();
+ }
+ return (t - time_start) / iters;
+}
+
+struct Shape {
+ int rows;
+ int depth;
+ int cols;
+};
+
+bool operator==(const Shape& s1, const Shape& s2) {
+ return s1.rows == s2.rows && s1.depth == s2.depth && s1.cols == s2.cols;
+}
+
+bool operator<(const Shape& shape1, const Shape& shape2) {
+ return shape1.depth < shape2.depth ||
+ (shape1.depth == shape2.depth &&
+ (shape1.rows < shape2.rows ||
+ (shape1.rows == shape2.rows && shape1.cols < shape2.cols)));
+};
+
+#ifdef _WIN32
+#define sleep(t) Sleep(t)
+#endif
+
+float benchmark(const Shape& shape) {
+ if (kCooldownBeforeBenchmarkSecs) {
+ sleep(kCooldownBeforeBenchmarkSecs);
+ }
+#if defined BENCHMARK_8bit
+ // Benchmark the fast 8bit path, using L8R8WithLhsNonzeroBitDepthParams.
+ // This is the recommended thing to default to: it's what most applications
+ // want to use, as it's the fastest.
+ // The contract is that LHS must take values in [1, 255], while RHS can take
+ // any value in [0, 255].
+ return benchmark_8bit<gemmlowp::L8R8WithLhsNonzeroBitDepthParams>(
+ shape.rows, shape.depth, shape.cols);
+#elif defined BENCHMARK_8bit_wide
+ // Variant benchmarking the slower (mostly legacy) DefaultL8R8BitDepthParams.
+ // The only contract difference is that both LHS and RHS can take values in
+ // [0, 255].
+ return benchmark_8bit<gemmlowp::DefaultL8R8BitDepthParams>(
+ shape.rows, shape.depth, shape.cols);
+#elif defined BENCHMARK_8bit_to_32bit
+ // Variant of BENCHMARK_8bit where the user asks for getting raw int32
+ // accumulators, instead of a 8bit-downscaled result.
+ return benchmark_8bit_to_32bit<gemmlowp::L8R8WithLhsNonzeroBitDepthParams>(
+ shape.rows, shape.depth, shape.cols);
+#elif defined BENCHMARK_8bit_to_32bit_wide
+ // Variant of BENCHMARK_8bit_wide where the user asks for getting raw int32
+ // accumulators, instead of a 8bit-downscaled result.
+ return benchmark_8bit_to_32bit<gemmlowp::DefaultL8R8BitDepthParams>(
+ shape.rows, shape.depth, shape.cols);
+#elif defined BENCHMARK_float
+ return benchmark_float(shape.rows, shape.depth, shape.cols);
+#else
+#error What arithmetic path should we benchmark? (Suggestion: #define BENCHMARK_8bit)
+#endif
+}
+
+std::set<int> all_sizes() {
+ std::set<int> sizes;
+ for (int i = 1; i <= 2048; i *= 2) {
+ sizes.insert(i);
+ }
+ for (double x = 8; x <= 2048; x *= std::sqrt(2.)) {
+ sizes.insert(static_cast<int>(std::round(x)));
+ }
+ for (double x = 16; x <= 512; x *= std::pow(2., 1. / 4.)) {
+ sizes.insert(static_cast<int>(std::round(x)));
+ }
+ return sizes;
+}
+
+std::mt19937& RandomEngine() {
+ static std::mt19937 engine;
+ return engine;
+}
+
+std::vector<Shape> all_shapes_in_random_order() {
+ std::vector<Shape> shapes;
+ const std::set<int> sizes = all_sizes();
+#if defined BENCHMARK_ROWS
+ // Benchmark one specific shape
+ Shape shape;
+ shape.rows = BENCHMARK_ROWS;
+ shape.depth = BENCHMARK_DEPTH;
+ shape.cols = BENCHMARK_COLS;
+ shapes.push_back(shape);
+#elif defined BENCHMARK_QUICK
+ // Benchmark an assortment of cubic shapes
+ for (int size : sizes) {
+ Shape shape;
+ shape.rows = size;
+ shape.depth = size;
+ shape.cols = size;
+ shapes.push_back(shape);
+ }
+#elif defined BENCHMARK_EXHAUSTIVE
+ // Benchmark all sorts of shapes
+ for (int rows : sizes) {
+ for (int depth : sizes) {
+ for (int cols : sizes) {
+ Shape shape;
+ shape.rows = rows;
+ shape.depth = depth;
+ shape.cols = cols;
+ shapes.push_back(shape);
+ }
+ }
+ }
+#else
+#error What shapes should we benchmark? (Suggestion: #define BENCHMARK_QUICK)
+#endif
+ std::shuffle(std::begin(shapes), std::end(shapes), RandomEngine());
+ return shapes;
+}
+
+void run_benchmarks(std::map<Shape, float>* results) {
+ std::vector<Shape> shapes;
+ for (int pass = 0; pass < kPasses; pass++) {
+ const std::vector<Shape> pass_shapes = all_shapes_in_random_order();
+ shapes.insert(std::end(shapes), std::begin(pass_shapes),
+ std::end(pass_shapes));
+ }
+
+ const double time_start = gemmlowp::real_time_in_seconds();
+ for (std::size_t i = 0; i < shapes.size(); i++) {
+ const double ratio = static_cast<double>(i) / shapes.size();
+ const double elapsed = gemmlowp::real_time_in_seconds() - time_start;
+ const double elapsed_hours = elapsed / 3600.;
+ const double eta_hours = elapsed_hours * (1. - ratio) / ratio;
+ fprintf(stderr,
+ "Benchmarking: %.2f%% done, Elapsed: %.2f hours, ETA: %.2f "
+ "hours... \r",
+ 100. * ratio, elapsed_hours, eta_hours);
+ fflush(stderr);
+ const Shape& shape = shapes[i];
+ float latency = benchmark(shape);
+ if (results->count(shape)) {
+ (*results)[shape] = std::min(latency, (*results)[shape]);
+ } else {
+ (*results)[shape] = latency;
+ }
+ }
+ fprintf(stderr, "\n");
+}
+
+int main() {
+ std::map<Shape, float> results;
+ run_benchmarks(&results);
+ printf("Using %d thread(s)\n", kNumThreads);
+ printf("depth,rows,cols,latency(s),Gop/s\n");
+ for (const auto& result : results) {
+ const Shape& shape = result.first;
+ printf("%d,%d,%d,%.4g,%.4g\n", shape.depth, shape.rows, shape.cols,
+ result.second,
+ 2e-9 * shape.depth * shape.rows * shape.cols / result.second);
+ }
+}
#include "test.h"
-#include <unistd.h>
#include <array>
#include <cstdint>
#include <cstdlib>
}
}
+ // Test a variant of the familiar default pipeline consisting of quantize-down
+ // and clamp-and-cast-to-int16.
+ OutputStageSaturatingCastToInt16 saturating_cast_int16_stage;
+ auto quantize_down_and_saturating_cast_int16_pipeline =
+ std::make_tuple(quantize_down_stage, saturating_cast_int16_stage);
+ Matrix<std::int16_t, ResultOrder> result_quantized_down_saturated_int16(rows,
+ cols);
+ GemmWithOutputPipeline<std::uint8_t, std::int16_t, DefaultL8R8BitDepthParams>(
+ &context, lhs.const_map(), rhs.const_map(),
+ &result_quantized_down_saturated_int16, lhs_offset, rhs_offset,
+ quantize_down_and_saturating_cast_int16_pipeline);
+
+ for (int r = 0; r < rows; r++) {
+ for (int c = 0; c < cols; c++) {
+ std::int32_t quantized = result_quantized_down_int32(r, c);
+ std::int16_t expected = std::min(std::max(quantized, -32768), 32767);
+ Check(expected == result_quantized_down_saturated_int16(r, c));
+ }
+ }
+
// Test a bias-addition with row-vector
std::vector<std::int32_t> row_vector_data(cols);
std::uniform_int_distribution<std::int32_t> uniform_minus_500_plus_500(-500,
result_fixedpoint_shift++;
}
Check(result_fixedpoint_shift >= 0);
- // Now test OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint
- OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint
+ // Now test OutputStageQuantizeDownInt32ByFixedPoint
+ OutputStageQuantizeDownInt32ByFixedPoint
quantize_down_by_fixedpoint_stage;
quantize_down_by_fixedpoint_stage.result_offset_after_shift =
static_cast<std::int32_t>(
&result_quantized_down_by_fixedpoint_int32, lhs_offset, rhs_offset,
quantize_down_by_fixedpoint_pipeline);
- std::vector<std::int32_t> diffs_caused_by_fixedpoint;
for (int r = 0; r < rows; r++) {
for (int c = 0; c < cols; c++) {
const std::int32_t actual =
}
}
+ // Test OutputStageScaleInt32ByFixedPointAndExponent
+ for (int exponent = -2; exponent <= 2; exponent++) {
+ OutputStageScaleInt32ByFixedPointAndExponent
+ scale_by_fixedpoint_and_exponent_stage;
+ scale_by_fixedpoint_and_exponent_stage.result_offset_after_shift =
+ static_cast<std::int32_t>(round(static_cast<double>(
+ result_offset * result_mult_int * std::pow(2.0, exponent))));
+ scale_by_fixedpoint_and_exponent_stage.result_fixedpoint_multiplier =
+ result_fixedpoint_multiplier;
+ scale_by_fixedpoint_and_exponent_stage.result_exponent = exponent;
+ auto scale_by_fixedpoint_and_exponent_pipeline =
+ std::make_tuple(scale_by_fixedpoint_and_exponent_stage);
+ Matrix<std::int32_t, ResultOrder>
+ result_scaled_by_fixedpoint_and_exponent_int32(rows, cols);
+ GemmWithOutputPipeline<std::uint8_t, std::int32_t,
+ DefaultL8R8BitDepthParams>(
+ &context, lhs.const_map(), rhs.const_map(),
+ &result_scaled_by_fixedpoint_and_exponent_int32, lhs_offset, rhs_offset,
+ scale_by_fixedpoint_and_exponent_pipeline);
+
+ for (int r = 0; r < rows; r++) {
+ for (int c = 0; c < cols; c++) {
+ const std::int32_t actual =
+ result_scaled_by_fixedpoint_and_exponent_int32(r, c);
+ const std::int32_t raw = result_raw_int32(r, c);
+ int left_shift = std::max(0, exponent);
+ int right_shift = std::max(0, -exponent);
+ const std::int32_t expected =
+ scale_by_fixedpoint_and_exponent_stage.result_offset_after_shift +
+ RoundingDivideByPOT(
+ SaturatingRoundingDoublingHighMul((1 << left_shift) * raw,
+ result_fixedpoint_multiplier),
+ right_shift);
+ Check(actual == expected);
+ }
+ }
+ }
+
// Test the variant of the familiar default pipeline consisting of
// quantize-down and
// clamp-and-cast-to-uint8, where we used fixedpoint multipliers for the
return dist(RandomEngine());
}
+#ifdef _MSC_VER
+// msvc does not support 8bit types in uniform_int_distribution<>.
+// Take 32 bit uniform_int_distribution<> and only use the lower 8 bits.
+template <typename OperandRange, typename MatrixType>
+void MakeRandom(MatrixType* m) {
+ ScopedProfilingLabel("MakeRandom(matrix)");
+ for (int c = 0; c < m->cols(); c++) {
+ for (int r = 0; r < m->rows(); r++) {
+ (*m)(r, c) = Random() % OperandRange::kMaxValue;
+ }
+ }
+}
+#else
template <typename OperandRange, typename MatrixType>
void MakeRandom(MatrixType* m) {
ScopedProfilingLabel("MakeRandom(matrix)");
}
}
}
+#endif
template <typename MatrixType>
void MakeConstant(MatrixType* m, typename MatrixType::Scalar val) {
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "../internal/allocator.h"
#include "test.h"
+#include "../internal/allocator.h"
namespace gemmlowp {
// limitations under the License.
#include "test.h"
+#include "../profiling/pthread_everywhere.h"
-#include <pthread.h>
#include <vector>
#include "../internal/multi_thread_gemm.h"
Thread(BlockingCounter* blocking_counter, int number_of_times_to_decrement)
: blocking_counter_(blocking_counter),
number_of_times_to_decrement_(number_of_times_to_decrement),
+ finished_(false),
made_the_last_decrement_(false) {
pthread_create(&thread_, nullptr, ThreadFunc, this);
}
~Thread() { Join(); }
bool Join() const {
- pthread_join(thread_, nullptr);
+ if (!finished_) {
+ pthread_join(thread_, nullptr);
+ }
return made_the_last_decrement_;
}
Check(!made_the_last_decrement_);
made_the_last_decrement_ = blocking_counter_->DecrementCount();
}
+ finished_ = true;
}
static void* ThreadFunc(void* ptr) {
BlockingCounter* const blocking_counter_;
const int number_of_times_to_decrement_;
pthread_t thread_;
+ bool finished_;
bool made_the_last_decrement_;
};
#include <algorithm>
#include <cmath>
+#include <cstdio>
+#include <cinttypes>
#include <random>
#include <vector>
#include "test.h"
namespace {
-// Explanation of SimdVector type and associated functions
-// (LoadSimdVector, StoreSimdVector):
-// The fixedpoint stuff being tested here is generic in an underlying
-// integer type which may be either scalar (int32_t) or SIMD (e.g.
-// NEON int32x4_t). We want to write uniform tests that can test
-// both the scalar and SIMD paths. We achieve this by having this
-// generic SimdVector abstraction, local to this test.
-
+template <typename T>
+T Load(const typename FixedPointRawTypeTraits<T>::ScalarRawType* src) {
+ return *src;
+}
+template <typename T>
+void Store(typename FixedPointRawTypeTraits<T>::ScalarRawType* dst, T v) {
+ *dst = v;
+}
#ifdef GEMMLOWP_NEON
-using SimdVector = int32x4_t;
-constexpr std::size_t SimdVectorSize = 4;
-SimdVector LoadSimdVector(const std::int32_t* src) { return vld1q_s32(src); }
-void StoreSimdVector(std::int32_t* dst, SimdVector v) { vst1q_s32(dst, v); }
-#elif defined(GEMMLOWP_SSE4)
-using SimdVector = __m128i;
-constexpr std::size_t SimdVectorSize = 4;
-SimdVector LoadSimdVector(const std::int32_t* src) {
+template <>
+int32x4_t Load<int32x4_t>(const std::int32_t* src) {
+ return vld1q_s32(src);
+}
+template <>
+int16x8_t Load<int16x8_t>(const std::int16_t* src) {
+ return vld1q_s16(src);
+}
+template <>
+void Store<int32x4_t>(std::int32_t* dst, int32x4_t v) {
+ vst1q_s32(dst, v);
+}
+template <>
+void Store<int16x8_t>(std::int16_t* dst, int16x8_t v) {
+ vst1q_s16(dst, v);
+}
+#endif
+#ifdef GEMMLOWP_SSE4
+template <>
+__m128i Load<__m128i>(const std::int32_t* src) {
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(src));
}
-void StoreSimdVector(std::int32_t* dst, SimdVector v) {
+template <>
+void Store<__m128i>(std::int32_t* dst, __m128i v) {
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst), v);
}
-#else
-using SimdVector = std::int32_t;
-constexpr std::size_t SimdVectorSize = 1;
-SimdVector LoadSimdVector(const std::int32_t* src) { return *src; }
-void StoreSimdVector(std::int32_t* dst, SimdVector v) { *dst = v; }
+template <>
+int16x8_m128i Load<int16x8_m128i>(const std::int16_t* src) {
+ return int16x8_m128i(_mm_loadu_si128(reinterpret_cast<const __m128i*>(src)));
+}
+template <>
+void Store<int16x8_m128i>(std::int16_t* dst, int16x8_m128i v) {
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), v.v);
+}
+#endif
+#ifdef GEMMLOWP_MSA
+template <>
+v4i32 Load<v4i32>(const std::int32_t* src) {
+ return __builtin_msa_ld_w(const_cast<std::int32_t*>(src), 0);
+}
+template <>
+v8i16 Load<v8i16>(const std::int16_t* src) {
+ return __builtin_msa_ld_h(const_cast<std::int16_t*>(src), 0);
+}
+template <>
+void Store<v4i32>(std::int32_t* dst, v4i32 v) {
+ __builtin_msa_st_w(v, dst, 0);
+}
+template <>
+void Store<v8i16>(std::int16_t* dst, v8i16 v) {
+ __builtin_msa_st_h(v, dst, 0);
+}
#endif
-// Explanation of UnaryOpBase, its *Op subclasses below, and TestUnaryOp:
-// Most (though not all) of the fixedpoint functionality being tested
-// consists of functions taking one fixedpoint value and returning one
-// fixedpoint value, e.g. "exp" or "tanh". We call them "unary operators".
-// We factor a lot of testing boilerplate into a common TestUnaryOp function
-// taking a "unary op" object that fully describes the function to be tested.
-// These objects inherit UnaryOpBase mostly as a means to share some default
-// values for some properties.
-//
-// An important design element here is that the fixed-point values are passed
-// around as raw integers (e.g. int32_t or SIMD types such as int32x4_t), not
-// as higher-level FixedPoint objects. The motivation for this design is 1) to
-// avoid having to templatize everything in the tIntegerBits parameter of
-// class FixedPoint, and 2) to allow directly testing low-level functions
-// operating on raw types (e.g. RoundingDivideByPOT) without needlessly
-// requiring
-// wrapping raw values in FixedPoint objects.
-class UnaryOpBase {
+template <typename tSimdType>
+class TestFixedPoint {
public:
- // Min bound of the input range of this op. For example, an op only handling
- // nonnegative values would return 0.
- std::int32_t MinInput() const {
- return std::numeric_limits<std::int32_t>::min();
- }
- // Max bound of the input range of this op. For example, an op only handling
- // nonpositive values would return 0.
- std::int32_t MaxInput() const {
- return std::numeric_limits<std::int32_t>::max();
- }
- // Tolerated difference between actual and reference int32 values.
- // Note that the corresponding real-numbers tolerance depends on the number
- // of integer bits of the fixed-point representation of the results of this
- // op.
- // For example, for an op returning fixed-point values with 0 integer bits,
- // the correspondence between real-number values and raw values is
- // real_number = (2^31) * raw_value.
- std::int32_t Tolerance() const { return 0; }
-};
+ using SimdType = tSimdType;
+ using SimdTypeTraits = FixedPointRawTypeTraits<SimdType>;
+ using ScalarType = typename SimdTypeTraits::ScalarRawType;
+ static constexpr int kSimdLanes = SimdTypeTraits::kLanes;
+ static constexpr int kScalarTypeBits = 8 * sizeof(ScalarType);
+
+ // Explanation of UnaryOpBase, its *Op subclasses below, and TestUnaryOp:
+ // Most (though not all) of the fixedpoint functionality being tested
+ // consists of functions taking one fixedpoint value and returning one
+ // fixedpoint value, e.g. "exp" or "tanh". We call them "unary operators".
+ // We factor a lot of testing boilerplate into a common TestUnaryOp function
+ // taking a "unary op" object that fully describes the function to be tested.
+ // These objects inherit UnaryOpBase mostly as a means to share some default
+ // values for some properties.
+ //
+ // An important design element here is that the fixed-point values are passed
+ // around as raw integers (e.g. int32_t or SIMD types such as int32x4_t), not
+ // as higher-level FixedPoint objects. The motivation for this design is 1) to
+ // avoid having to templatize everything in the tIntegerBits parameter of
+ // class FixedPoint, and 2) to allow directly testing low-level functions
+ // operating on raw types (e.g. RoundingDivideByPOT) without needlessly
+ // requiring
+ // wrapping raw values in FixedPoint objects.
+ class UnaryOpBase {
+ public:
+ // Min bound of the input range of this op. For example, an op only handling
+ // nonnegative values would return 0.
+ ScalarType MinInput() const {
+ return std::numeric_limits<ScalarType>::min();
+ }
+ // Max bound of the input range of this op. For example, an op only handling
+ // nonpositive values would return 0.
+ ScalarType MaxInput() const {
+ return std::numeric_limits<ScalarType>::max();
+ }
+ // Tolerated difference between actual and reference ScalarType values.
+ // Note that the corresponding real-numbers tolerance depends on the number
+ // of integer bits of the fixed-point representation of the results of this
+ // op.
+ // For example, for an op returning fixed-point values with 0 integer bits,
+ // the correspondence between real-number values and raw values is
+ // real_number = (2^31) * raw_value.
+ ScalarType Tolerance() const { return 0; }
+ };
+
+ // Op wrapping RoundingDivideByPOT
+ class RoundingDivideByPOTOp final : public UnaryOpBase {
+ public:
+ RoundingDivideByPOTOp(int exponent) : exponent_(exponent) {}
+ ScalarType ReferenceOp(ScalarType x) const {
+ const double d = static_cast<double>(x) / (1ll << exponent_);
+ return static_cast<ScalarType>(std::round(d));
+ }
+ template <typename RawType>
+ RawType Op(RawType x) const {
+ return RoundingDivideByPOT(x, exponent_);
+ }
-// Op wrapping RoundingDivideByPOT
-class RoundingDivideByPOTOp final : public UnaryOpBase {
- public:
- RoundingDivideByPOTOp(int exponent) : exponent_(exponent) {}
- std::int32_t ReferenceOp(std::int32_t x) const {
- const double d = static_cast<double>(x) / (1ll << exponent_);
- return static_cast<std::int32_t>(std::round(d));
- }
- template <typename tRawType>
- tRawType Op(tRawType x) const {
- return RoundingDivideByPOT(x, exponent_);
+ private:
+ const int exponent_;
+ };
+
+ // Op wrapping SaturatingRoundingMultiplyByPOT
+ template <int tExponent>
+ class SaturatingRoundingMultiplyByPOTOp final : public UnaryOpBase {
+ public:
+ ScalarType ReferenceOp(ScalarType x) const {
+ const double d = static_cast<double>(x) * std::pow(2., tExponent);
+ const double clamp_min = std::numeric_limits<ScalarType>::min();
+ const double clamp_max = std::numeric_limits<ScalarType>::max();
+ const double clamped = std::min(clamp_max, std::max(clamp_min, d));
+ return static_cast<ScalarType>(std::round(clamped));
+ }
+ template <typename RawType>
+ RawType Op(RawType x) const {
+ return SaturatingRoundingMultiplyByPOT<tExponent>(x);
+ }
+ };
+
+ // Op wrapping exp_on_interval_between_negative_one_quarter_and_0_excl
+ class ExpOnIntervalBetweenNegativeOneQuarterAnd0ExclOp final
+ : public UnaryOpBase {
+ public:
+ ScalarType MinInput() const { return -(1 << (kScalarTypeBits - 3)); }
+ ScalarType MaxInput() const { return 0; }
+ ScalarType Tolerance() const { return kScalarTypeBits == 32 ? 500 : 1; }
+ ScalarType ReferenceOp(ScalarType x) const {
+ using F = FixedPoint<ScalarType, 0>;
+ const double d = ToDouble(F::FromRaw(x));
+ const double e = std::exp(d);
+ return F::FromDouble(e).raw();
+ }
+ template <typename RawType>
+ RawType Op(RawType x) const {
+ using F = FixedPoint<RawType, 0>;
+ const F f = F::FromRaw(x);
+ const F e = exp_on_interval_between_negative_one_quarter_and_0_excl(f);
+ return e.raw();
+ }
+ };
+
+ // Op wrapping exp_on_negative_values
+ template <int tIntegerBits>
+ class ExpOnNegativeValuesOp final : public UnaryOpBase {
+ public:
+ ScalarType MaxInput() const { return 0; }
+ ScalarType Tolerance() const { return kScalarTypeBits == 32 ? 500 : 2; }
+ ScalarType ReferenceOp(ScalarType x) const {
+ using F = FixedPoint<ScalarType, tIntegerBits>;
+ using F0 = FixedPoint<ScalarType, 0>;
+ const double d = ToDouble(F::FromRaw(x));
+ const double e = std::exp(d);
+ return F0::FromDouble(e).raw();
+ }
+ template <typename RawType>
+ RawType Op(RawType x) const {
+ using F = FixedPoint<RawType, tIntegerBits>;
+ const F f = F::FromRaw(x);
+ return exp_on_negative_values(f).raw();
+ }
+ };
+
+ // Op wrapping one_minus_x_over_one_plus_x_for_x_in_0_1
+ class OneMinusXOverOnePlusXForXIn01Op final : public UnaryOpBase {
+ public:
+ ScalarType MinInput() const { return 0; }
+ ScalarType Tolerance() const { return kScalarTypeBits == 32 ? 12 : 11; }
+ ScalarType ReferenceOp(ScalarType x) const {
+ using F = FixedPoint<ScalarType, 0>;
+ const double d = ToDouble(F::FromRaw(x));
+ const double e = (1 - d) / (1 + d);
+ return F::FromDouble(e).raw();
+ }
+ template <typename RawType>
+ RawType Op(RawType x) const {
+ using F = FixedPoint<RawType, 0>;
+ const F f = F::FromRaw(x);
+ return one_minus_x_over_one_plus_x_for_x_in_0_1(f).raw();
+ }
+ };
+
+ // Op wrapping tanh
+ template <int tIntegerBits>
+ class TanhOp final : public UnaryOpBase {
+ public:
+ ScalarType Tolerance() const { return kScalarTypeBits == 32 ? 310 : 12; }
+ ScalarType ReferenceOp(ScalarType x) const {
+ using F = FixedPoint<ScalarType, tIntegerBits>;
+ using F0 = FixedPoint<ScalarType, 0>;
+ const double d = ToDouble(F::FromRaw(x));
+ const double e = std::tanh(d);
+ return F0::FromDouble(e).raw();
+ }
+ template <typename RawType>
+ RawType Op(RawType x) const {
+ using F = FixedPoint<RawType, tIntegerBits>;
+ const F f = F::FromRaw(x);
+ return tanh(f).raw();
+ }
+ };
+
+ // Op wrapping one_over_one_plus_x_for_x_in_0_1
+ class OneOverOnePlusXForXIn01Op final : public UnaryOpBase {
+ public:
+ ScalarType MinInput() const { return 0; }
+ ScalarType Tolerance() const { return kScalarTypeBits == 32 ? 6 : 5; }
+ ScalarType ReferenceOp(ScalarType x) const {
+ using F = FixedPoint<ScalarType, 0>;
+ const double d = ToDouble(F::FromRaw(x));
+ const double e = 1 / (1 + d);
+ return F::FromDouble(e).raw();
+ }
+ template <typename RawType>
+ RawType Op(RawType x) const {
+ using F = FixedPoint<RawType, 0>;
+ const F f = F::FromRaw(x);
+ return one_over_one_plus_x_for_x_in_0_1(f).raw();
+ }
+ };
+
+ // Op wrapping logistic
+ template <int tIntegerBits>
+ class LogisticOp final : public UnaryOpBase {
+ public:
+ ScalarType Tolerance() const { return kScalarTypeBits == 32 ? 155 : 6; }
+ ScalarType ReferenceOp(ScalarType x) const {
+ using F = FixedPoint<ScalarType, tIntegerBits>;
+ using F0 = FixedPoint<ScalarType, 0>;
+ const double d = ToDouble(F::FromRaw(x));
+ const double e = 1 / (1 + std::exp(-d));
+ return F0::FromDouble(e).raw();
+ }
+ template <typename RawType>
+ RawType Op(RawType x) const {
+ using F = FixedPoint<RawType, tIntegerBits>;
+ const F f = F::FromRaw(x);
+ return logistic(f).raw();
+ }
+ };
+
+ // Tests a given op, on a given list of int32 input values.
+ template <typename tUnaryOpType>
+ void TestUnaryOp(const tUnaryOpType& unary_op,
+ const std::vector<ScalarType>& testvals) {
+ Check(0 == (testvals.size() % kSimdLanes));
+ for (std::size_t i = 0; i < testvals.size(); i += kSimdLanes) {
+ // First, clamp input values accoding to the MinInput() and MaxInput()
+ // bounds returned by the op.
+ ScalarType input[kSimdLanes] = {0};
+ for (std::size_t j = 0; j < kSimdLanes; j++) {
+ const ScalarType raw_input = testvals[i + j];
+ input[j] = std::min(unary_op.MaxInput(),
+ std::max(unary_op.MinInput(), raw_input));
+ }
+ // Compute reference results and check that the actual results on
+ // scalar inputs agree with them, to the Tolerance() returned by the op.
+ ScalarType reference[kSimdLanes] = {0};
+ ScalarType actual_scalar[kSimdLanes] = {0};
+ for (std::size_t j = 0; j < kSimdLanes; j++) {
+ reference[j] = unary_op.ReferenceOp(input[j]);
+ actual_scalar[j] = unary_op.Op(input[j]);
+ const std::int64_t diff = static_cast<std::int64_t>(actual_scalar[j]) -
+ static_cast<std::int64_t>(reference[j]);
+ if (std::abs(diff) > unary_op.Tolerance()) {
+ fprintf(stderr, "abs(diff) (%" PRId64 ") > tolerance (%d)\n", diff,
+ unary_op.Tolerance());
+ }
+ Check(std::abs(diff) <= unary_op.Tolerance());
+ }
+ // Check that the actual results on SIMD inputs agree *exactly* with the
+ // actual results on scalar inputs. I.e. SIMD must make absolutely no
+ // difference
+ // to the results, regardless of the fact that both scalar and SIMD
+ // results may differ from the reference results.
+ ScalarType actual_simd[kSimdLanes] = {0};
+ Store<SimdType>(actual_simd, unary_op.Op(Load<SimdType>(input)));
+ for (std::size_t j = 0; j < kSimdLanes; j++) {
+ if (actual_simd[j] != actual_scalar[j]) {
+ fprintf(stderr, "SIMD (%d) != scalar (%d)\n", actual_simd[j],
+ actual_scalar[j]);
+ }
+ Check(actual_simd[j] == actual_scalar[j]);
+ }
+ }
}
- private:
- const int exponent_;
-};
-
-// Op wrapping SaturatingRoundingMultiplyByPOT
-template <int tExponent>
-class SaturatingRoundingMultiplyByPOTOp final : public UnaryOpBase {
- public:
- std::int32_t ReferenceOp(std::int32_t x) const {
- const double d = static_cast<double>(x) * std::pow(2., tExponent);
- const double clamp_min = std::numeric_limits<std::int32_t>::min();
- const double clamp_max = std::numeric_limits<std::int32_t>::max();
- const double clamped = std::min(clamp_max, std::max(clamp_min, d));
- return static_cast<std::int32_t>(std::round(clamped));
- }
- template <typename tRawType>
- tRawType Op(tRawType x) const {
- return SaturatingRoundingMultiplyByPOT<tExponent>(x);
+ template <int tIntegerBits>
+ void test_convert(FixedPoint<ScalarType, tIntegerBits> x) {
+ typedef FixedPoint<ScalarType, tIntegerBits> F;
+ F y = F::FromDouble(ToDouble(x));
+ Check(y == x);
}
-};
-// Op wrapping exp_on_interval_between_negative_one_quarter_and_0_excl
-class ExpOnIntervalBetweenNegativeOneQuarterAnd0ExclOp final
- : public UnaryOpBase {
- public:
- std::int32_t MinInput() const { return -(1 << 29); }
- std::int32_t MaxInput() const { return 0; }
- std::int32_t Tolerance() const { return 500; }
- std::int32_t ReferenceOp(std::int32_t x) const {
- using F = FixedPoint<std::int32_t, 0>;
- const double d = ToDouble(F::FromRaw(x));
- const double e = std::exp(d);
- return F::FromDouble(e).raw();
- }
- template <typename tRawType>
- tRawType Op(tRawType x) const {
- using F = FixedPoint<tRawType, 0>;
- const F f = F::FromRaw(x);
- const F e = exp_on_interval_between_negative_one_quarter_and_0_excl(f);
- return e.raw();
+ template <int tIntegerBits_a, int tIntegerBits_b>
+ void test_Rescale(FixedPoint<ScalarType, tIntegerBits_a> a) {
+ FixedPoint<ScalarType, tIntegerBits_b> actual = Rescale<tIntegerBits_b>(a);
+ FixedPoint<ScalarType, tIntegerBits_b> expected =
+ FixedPoint<ScalarType, tIntegerBits_b>::FromDouble(ToDouble(a));
+ Check(actual == expected);
}
-};
-// Op wrapping exp_on_negative_values
-template <int tIntegerBits>
-class ExpOnNegativeValuesOp final : public UnaryOpBase {
- public:
- std::int32_t MaxInput() const { return 0; }
- std::int32_t Tolerance() const { return 500; }
- std::int32_t ReferenceOp(std::int32_t x) const {
- using F = FixedPoint<std::int32_t, tIntegerBits>;
- using F0 = FixedPoint<std::int32_t, 0>;
- const double d = ToDouble(F::FromRaw(x));
- const double e = std::exp(d);
- return F0::FromDouble(e).raw();
- }
- template <typename tRawType>
- tRawType Op(tRawType x) const {
- using F = FixedPoint<tRawType, tIntegerBits>;
- const F f = F::FromRaw(x);
- return exp_on_negative_values(f).raw();
+ template <int tIntegerBits_a, int tIntegerBits_b>
+ void test_Rescale(const std::vector<ScalarType>& testvals) {
+ for (auto a : testvals) {
+ FixedPoint<ScalarType, tIntegerBits_a> aq;
+ aq.raw() = a;
+ test_Rescale<tIntegerBits_a, tIntegerBits_b>(aq);
+ }
}
-};
-// Op wrapping one_minus_x_over_one_plus_x_for_x_in_0_1
-class OneMinusXOverOnePlusXForXIn01Op final : public UnaryOpBase {
- public:
- std::int32_t MinInput() const { return 0; }
- std::int32_t Tolerance() const { return 12; }
- std::int32_t ReferenceOp(std::int32_t x) const {
- using F = FixedPoint<std::int32_t, 0>;
- const double d = ToDouble(F::FromRaw(x));
- const double e = (1 - d) / (1 + d);
- return F::FromDouble(e).raw();
- }
- template <typename tRawType>
- tRawType Op(tRawType x) const {
- using F = FixedPoint<tRawType, 0>;
- const F f = F::FromRaw(x);
- return one_minus_x_over_one_plus_x_for_x_in_0_1(f).raw();
+ template <int tIntegerBits_a, int tIntegerBits_b>
+ void test_mul(FixedPoint<ScalarType, tIntegerBits_a> a,
+ FixedPoint<ScalarType, tIntegerBits_b> b) {
+ static const int ProductIntegerBits = tIntegerBits_a + tIntegerBits_b;
+ using ProductFixedPoint = FixedPoint<ScalarType, ProductIntegerBits>;
+ ProductFixedPoint ab;
+ ab = a * b;
+ double a_double = ToDouble(a);
+ double b_double = ToDouble(b);
+ double ab_double = a_double * b_double;
+ ProductFixedPoint expected = ProductFixedPoint::FromDouble(ab_double);
+ std::int64_t diff = std::int64_t(ab.raw()) - std::int64_t(expected.raw());
+ Check(std::abs(diff) <= 1);
}
-};
-// Op wrapping tanh
-template <int tIntegerBits>
-class TanhOp final : public UnaryOpBase {
- public:
- std::int32_t Tolerance() const { return 310; }
- std::int32_t ReferenceOp(std::int32_t x) const {
- using F = FixedPoint<std::int32_t, tIntegerBits>;
- using F0 = FixedPoint<std::int32_t, 0>;
- const double d = ToDouble(F::FromRaw(x));
- const double e = std::tanh(d);
- return F0::FromDouble(e).raw();
- }
- template <typename tRawType>
- tRawType Op(tRawType x) const {
- using F = FixedPoint<tRawType, tIntegerBits>;
- const F f = F::FromRaw(x);
- return tanh(f).raw();
+ template <int tIntegerBits_a, int tIntegerBits_b>
+ void test_mul(const std::vector<ScalarType>& testvals) {
+ for (auto a : testvals) {
+ for (auto b : testvals) {
+ FixedPoint<ScalarType, tIntegerBits_a> aq;
+ FixedPoint<ScalarType, tIntegerBits_b> bq;
+ aq.raw() = a;
+ bq.raw() = b;
+ test_mul(aq, bq);
+ }
+ }
}
-};
-// Op wrapping one_over_one_plus_x_for_x_in_0_1
-class OneOverOnePlusXForXIn01Op final : public UnaryOpBase {
- public:
- std::int32_t MinInput() const { return 0; }
- std::int32_t Tolerance() const { return 6; }
- std::int32_t ReferenceOp(std::int32_t x) const {
- using F = FixedPoint<std::int32_t, 0>;
- const double d = ToDouble(F::FromRaw(x));
- const double e = 1 / (1 + d);
- return F::FromDouble(e).raw();
+ template <int tExponent, int tIntegerBits_a>
+ void test_ExactMulByPot(FixedPoint<ScalarType, tIntegerBits_a> a) {
+ double x = ToDouble(a) * std::pow(2.0, tExponent);
+ double y = ToDouble(ExactMulByPot<tExponent>(a));
+ Check(x == y);
}
- template <typename tRawType>
- tRawType Op(tRawType x) const {
- using F = FixedPoint<tRawType, 0>;
- const F f = F::FromRaw(x);
- return one_over_one_plus_x_for_x_in_0_1(f).raw();
- }
-};
-// Op wrapping logistic
-template <int tIntegerBits>
-class LogisticOp final : public UnaryOpBase {
- public:
- std::int32_t Tolerance() const { return 155; }
- std::int32_t ReferenceOp(std::int32_t x) const {
- using F = FixedPoint<std::int32_t, tIntegerBits>;
- using F0 = FixedPoint<std::int32_t, 0>;
- const double d = ToDouble(F::FromRaw(x));
- const double e = 1 / (1 + std::exp(-d));
- return F0::FromDouble(e).raw();
- }
- template <typename tRawType>
- tRawType Op(tRawType x) const {
- using F = FixedPoint<tRawType, tIntegerBits>;
- const F f = F::FromRaw(x);
- return logistic(f).raw();
+ template <int tExponent, int tIntegerBits_a>
+ void test_ExactMulByPot(const std::vector<ScalarType>& testvals) {
+ for (auto a : testvals) {
+ FixedPoint<ScalarType, tIntegerBits_a> aq;
+ aq.raw() = a;
+ test_ExactMulByPot<tExponent, tIntegerBits_a>(aq);
+ }
}
-};
-// Tests a given op, on a given list of int32 input values.
-template <typename tUnaryOpType>
-void TestUnaryOp(const tUnaryOpType& unary_op,
- const std::vector<std::int32_t>& testvals_int32) {
- Check(0 == (testvals_int32.size() % SimdVectorSize));
- for (std::size_t i = 0; i < testvals_int32.size(); i += SimdVectorSize) {
- // First, clamp input int32 values accoding to the MinInput() and MaxInput()
- // bounds returned by the op.
- std::int32_t input[SimdVectorSize] = {0};
- for (std::size_t j = 0; j < SimdVectorSize; j++) {
- const std::int32_t raw_input = testvals_int32[i + j];
- input[j] = std::min(unary_op.MaxInput(),
- std::max(unary_op.MinInput(), raw_input));
- }
- // Compute reference results and check that the actual results on
- // scalar inputs agree with them, to the Tolerance() returned by the op.
- std::int32_t reference[SimdVectorSize] = {0};
- std::int32_t actual_scalar[SimdVectorSize] = {0};
- for (std::size_t j = 0; j < SimdVectorSize; j++) {
- reference[j] = unary_op.ReferenceOp(input[j]);
- actual_scalar[j] = unary_op.Op(input[j]);
- const std::int64_t diff = static_cast<std::int64_t>(actual_scalar[j]) -
- static_cast<std::int64_t>(reference[j]);
- Check(std::abs(diff) <= unary_op.Tolerance());
+ // Make the list of test values to test each op against.
+ std::vector<ScalarType> MakeTestVals() {
+ std::vector<ScalarType> testvals;
+
+ for (int i = 0; i < kScalarTypeBits - 1; i++) {
+ testvals.push_back((1 << i) - 2);
+ testvals.push_back((1 << i) - 1);
+ testvals.push_back((1 << i));
+ testvals.push_back((1 << i) + 1);
+ testvals.push_back((1 << i) + 2);
+ testvals.push_back(-(1 << i) - 2);
+ testvals.push_back(-(1 << i) - 1);
+ testvals.push_back(-(1 << i));
+ testvals.push_back(-(1 << i) + 1);
+ testvals.push_back(-(1 << i) + 2);
}
- // Check that the actual results on SIMD inputs agree *exactly* with the
- // actual results on scalar inputs. I.e. SIMD must make absolutely no
- // difference
- // to the results, regardless of the fact that both scalar and SIMD results
- // may differ from the reference results.
- std::int32_t actual_simd[SimdVectorSize] = {0};
- StoreSimdVector(actual_simd, unary_op.Op(LoadSimdVector(input)));
- for (std::size_t j = 0; j < SimdVectorSize; j++) {
- Check(actual_simd[j] == actual_scalar[j]);
+ testvals.push_back(std::numeric_limits<ScalarType>::min());
+ testvals.push_back(std::numeric_limits<ScalarType>::min() + 1);
+ testvals.push_back(std::numeric_limits<ScalarType>::min() + 2);
+ testvals.push_back(std::numeric_limits<ScalarType>::max() - 2);
+ testvals.push_back(std::numeric_limits<ScalarType>::max() - 1);
+ testvals.push_back(std::numeric_limits<ScalarType>::max());
+
+ std::mt19937 random_engine;
+ std::uniform_int_distribution<ScalarType> uniform_distribution(
+ std::numeric_limits<ScalarType>::min(),
+ std::numeric_limits<ScalarType>::max());
+ for (int i = 0; i < 1000; i++) {
+ testvals.push_back(uniform_distribution(random_engine));
}
- }
-}
-
-template <int tIntegerBits>
-void test_convert(FixedPoint<std::int32_t, tIntegerBits> x) {
- typedef FixedPoint<std::int32_t, tIntegerBits> F;
- F y = F::FromDouble(ToDouble(x));
- Check(y == x);
-}
-template <int tIntegerBits_a, int tIntegerBits_b>
-void test_Rescale(FixedPoint<std::int32_t, tIntegerBits_a> a) {
- FixedPoint<std::int32_t, tIntegerBits_b> actual = Rescale<tIntegerBits_b>(a);
- FixedPoint<std::int32_t, tIntegerBits_b> expected =
- FixedPoint<std::int32_t, tIntegerBits_b>::FromDouble(ToDouble(a));
- Check(actual == expected);
-}
+ // SIMD tests will require the length of testvals to be a multiple
+ // of SIMD vector size.
+ while (testvals.size() % kSimdLanes) {
+ testvals.push_back(0);
+ }
-template <int tIntegerBits_a, int tIntegerBits_b>
-void test_Rescale(const std::vector<std::int32_t>& testvals_int32) {
- for (auto a : testvals_int32) {
- FixedPoint<std::int32_t, tIntegerBits_a> aq;
- aq.raw() = a;
- test_Rescale<tIntegerBits_a, tIntegerBits_b>(aq);
+ std::sort(testvals.begin(), testvals.end());
+ return testvals;
}
-}
-template <int tIntegerBits_a, int tIntegerBits_b>
-void test_mul(FixedPoint<std::int32_t, tIntegerBits_a> a,
- FixedPoint<std::int32_t, tIntegerBits_b> b) {
- static const int ProductIntegerBits = tIntegerBits_a + tIntegerBits_b;
- using ProductFixedPoint = FixedPoint<std::int32_t, ProductIntegerBits>;
- ProductFixedPoint ab;
- ab = a * b;
- double a_double = ToDouble(a);
- double b_double = ToDouble(b);
- double ab_double = a_double * b_double;
- ProductFixedPoint expected = ProductFixedPoint::FromDouble(ab_double);
- std::int64_t diff = std::int64_t(ab.raw()) - std::int64_t(expected.raw());
- Check(std::abs(diff) <= 1);
-}
+ void RunTests(const char* msg) {
+ const std::vector<ScalarType> testvals = MakeTestVals();
-template <int tIntegerBits_a, int tIntegerBits_b>
-void test_mul(const std::vector<std::int32_t>& testvals_int32) {
- for (auto a : testvals_int32) {
- for (auto b : testvals_int32) {
- FixedPoint<std::int32_t, tIntegerBits_a> aq;
- FixedPoint<std::int32_t, tIntegerBits_b> bq;
- aq.raw() = a;
- bq.raw() = b;
- test_mul(aq, bq);
+ for (int s = 0; s < kScalarTypeBits; s++) {
+ TestUnaryOp(RoundingDivideByPOTOp(s), testvals);
}
- }
-}
-template <int tExponent, int tIntegerBits_a>
-void test_ExactMulByPot(FixedPoint<std::int32_t, tIntegerBits_a> a) {
- double x = ToDouble(a) * std::pow(2.0, tExponent);
- double y = ToDouble(ExactMulByPot<tExponent>(a));
- Check(x == y);
-}
-
-template <int tExponent, int tIntegerBits_a>
-void test_ExactMulByPot(const std::vector<std::int32_t>& testvals_int32) {
- for (auto a : testvals_int32) {
- FixedPoint<std::int32_t, tIntegerBits_a> aq;
- aq.raw() = a;
- test_ExactMulByPot<tExponent, tIntegerBits_a>(aq);
- }
-}
-
-// Make the list of test values to test each op against.
-std::vector<std::int32_t> MakeTestValsInt32() {
- std::vector<std::int32_t> testvals_int32;
-
- for (int i = 0; i < 31; i++) {
- testvals_int32.push_back((1 << i) - 2);
- testvals_int32.push_back((1 << i) - 1);
- testvals_int32.push_back((1 << i));
- testvals_int32.push_back((1 << i) + 1);
- testvals_int32.push_back((1 << i) + 2);
- testvals_int32.push_back(-(1 << i) - 2);
- testvals_int32.push_back(-(1 << i) - 1);
- testvals_int32.push_back(-(1 << i));
- testvals_int32.push_back(-(1 << i) + 1);
- testvals_int32.push_back(-(1 << i) + 2);
- }
- testvals_int32.push_back(std::numeric_limits<std::int32_t>::min());
- testvals_int32.push_back(std::numeric_limits<std::int32_t>::min() + 1);
- testvals_int32.push_back(std::numeric_limits<std::int32_t>::min() + 2);
- testvals_int32.push_back(std::numeric_limits<std::int32_t>::max() - 2);
- testvals_int32.push_back(std::numeric_limits<std::int32_t>::max() - 1);
- testvals_int32.push_back(std::numeric_limits<std::int32_t>::max());
-
- std::mt19937 random_engine;
- std::uniform_int_distribution<std::int32_t> uniform_distribution(
- std::numeric_limits<std::int32_t>::min(),
- std::numeric_limits<std::int32_t>::max());
- for (int i = 0; i < 1000; i++) {
- testvals_int32.push_back(uniform_distribution(random_engine));
- }
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<1 - kScalarTypeBits>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<2 - kScalarTypeBits>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<3 - kScalarTypeBits>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<14 - kScalarTypeBits>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<15 - kScalarTypeBits>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-15>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-4>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-3>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-2>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-1>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<0>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<1>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<2>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<3>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<4>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<15>(), testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<kScalarTypeBits - 15>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<kScalarTypeBits - 14>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<kScalarTypeBits - 3>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<kScalarTypeBits - 2>(),
+ testvals);
+ TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<kScalarTypeBits - 1>(),
+ testvals);
+
+ TestUnaryOp(ExpOnIntervalBetweenNegativeOneQuarterAnd0ExclOp(), testvals);
+ TestUnaryOp(ExpOnNegativeValuesOp<0>(), testvals);
+ TestUnaryOp(ExpOnNegativeValuesOp<1>(), testvals);
+ TestUnaryOp(ExpOnNegativeValuesOp<2>(), testvals);
+ TestUnaryOp(ExpOnNegativeValuesOp<3>(), testvals);
+ TestUnaryOp(ExpOnNegativeValuesOp<4>(), testvals);
+ TestUnaryOp(ExpOnNegativeValuesOp<5>(), testvals);
+ TestUnaryOp(ExpOnNegativeValuesOp<6>(), testvals);
+
+ TestUnaryOp(OneMinusXOverOnePlusXForXIn01Op(), testvals);
+ TestUnaryOp(TanhOp<0>(), testvals);
+ TestUnaryOp(TanhOp<1>(), testvals);
+ TestUnaryOp(TanhOp<2>(), testvals);
+ TestUnaryOp(TanhOp<3>(), testvals);
+ TestUnaryOp(TanhOp<4>(), testvals);
+ TestUnaryOp(TanhOp<5>(), testvals);
+ TestUnaryOp(TanhOp<6>(), testvals);
+
+ TestUnaryOp(OneOverOnePlusXForXIn01Op(), testvals);
+ TestUnaryOp(LogisticOp<0>(), testvals);
+ TestUnaryOp(LogisticOp<1>(), testvals);
+ TestUnaryOp(LogisticOp<2>(), testvals);
+ TestUnaryOp(LogisticOp<3>(), testvals);
+ TestUnaryOp(LogisticOp<4>(), testvals);
+ TestUnaryOp(LogisticOp<5>(), testvals);
+ TestUnaryOp(LogisticOp<6>(), testvals);
+
+ for (auto a : testvals) {
+ FixedPoint<ScalarType, 4> x;
+ x.raw() = a;
+ test_convert(x);
+ }
- // SIMD tests will require the length of testvals_int32 to be a multiple
- // of SIMD vector size.
- while (testvals_int32.size() % SimdVectorSize) {
- testvals_int32.push_back(0);
+ test_mul<0, 0>(testvals);
+ test_mul<0, 1>(testvals);
+ test_mul<2, 0>(testvals);
+ test_mul<1, 1>(testvals);
+ test_mul<4, 4>(testvals);
+ test_mul<3, 5>(testvals);
+ test_mul<7, 2>(testvals);
+ test_mul<kScalarTypeBits / 2 - 1, kScalarTypeBits / 2 - 2>(testvals);
+
+ test_Rescale<0, 0>(testvals);
+ test_Rescale<0, 1>(testvals);
+ test_Rescale<2, 0>(testvals);
+ test_Rescale<4, 4>(testvals);
+ test_Rescale<4, 5>(testvals);
+ test_Rescale<6, 3>(testvals);
+ test_Rescale<13, 9>(testvals);
+
+ test_ExactMulByPot<0, 0>(testvals);
+ test_ExactMulByPot<0, 4>(testvals);
+ test_ExactMulByPot<1, 4>(testvals);
+ test_ExactMulByPot<3, 2>(testvals);
+ test_ExactMulByPot<-4, 5>(testvals);
+ test_ExactMulByPot<-2, 6>(testvals);
+
+ fprintf(stderr, "PASS (%s)\n", msg);
}
-
- std::sort(testvals_int32.begin(), testvals_int32.end());
- return testvals_int32;
-}
+};
} // end anonymous namespace
} // end namespace gemmlowp
int main() {
- using namespace gemmlowp;
-
- const std::vector<std::int32_t> testvals_int32 = MakeTestValsInt32();
-
- for (int s = 0; s < 32; s++) {
- TestUnaryOp(RoundingDivideByPOTOp(s), testvals_int32);
- }
-
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-31>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-30>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-29>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-17>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-16>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-15>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-4>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-3>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-2>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<-1>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<0>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<1>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<2>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<3>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<4>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<15>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<16>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<17>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<29>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<30>(), testvals_int32);
- TestUnaryOp(SaturatingRoundingMultiplyByPOTOp<31>(), testvals_int32);
-
- TestUnaryOp(ExpOnIntervalBetweenNegativeOneQuarterAnd0ExclOp(),
- testvals_int32);
- TestUnaryOp(ExpOnNegativeValuesOp<0>(), testvals_int32);
- TestUnaryOp(ExpOnNegativeValuesOp<1>(), testvals_int32);
- TestUnaryOp(ExpOnNegativeValuesOp<2>(), testvals_int32);
- TestUnaryOp(ExpOnNegativeValuesOp<3>(), testvals_int32);
- TestUnaryOp(ExpOnNegativeValuesOp<4>(), testvals_int32);
- TestUnaryOp(ExpOnNegativeValuesOp<5>(), testvals_int32);
- TestUnaryOp(ExpOnNegativeValuesOp<6>(), testvals_int32);
-
- TestUnaryOp(OneMinusXOverOnePlusXForXIn01Op(), testvals_int32);
- TestUnaryOp(TanhOp<0>(), testvals_int32);
- TestUnaryOp(TanhOp<1>(), testvals_int32);
- TestUnaryOp(TanhOp<2>(), testvals_int32);
- TestUnaryOp(TanhOp<3>(), testvals_int32);
- TestUnaryOp(TanhOp<4>(), testvals_int32);
- TestUnaryOp(TanhOp<5>(), testvals_int32);
- TestUnaryOp(TanhOp<6>(), testvals_int32);
-
- TestUnaryOp(OneOverOnePlusXForXIn01Op(), testvals_int32);
- TestUnaryOp(LogisticOp<0>(), testvals_int32);
- TestUnaryOp(LogisticOp<1>(), testvals_int32);
- TestUnaryOp(LogisticOp<2>(), testvals_int32);
- TestUnaryOp(LogisticOp<3>(), testvals_int32);
- TestUnaryOp(LogisticOp<4>(), testvals_int32);
- TestUnaryOp(LogisticOp<5>(), testvals_int32);
- TestUnaryOp(LogisticOp<6>(), testvals_int32);
-
- for (auto a : testvals_int32) {
- FixedPoint<std::int32_t, 4> x;
- x.raw() = a;
- test_convert(x);
- }
-
- test_mul<0, 0>(testvals_int32);
- test_mul<0, 1>(testvals_int32);
- test_mul<2, 0>(testvals_int32);
- test_mul<1, 1>(testvals_int32);
- test_mul<4, 4>(testvals_int32);
- test_mul<3, 5>(testvals_int32);
- test_mul<7, 2>(testvals_int32);
- test_mul<14, 15>(testvals_int32);
-
- test_Rescale<0, 0>(testvals_int32);
- test_Rescale<0, 1>(testvals_int32);
- test_Rescale<2, 0>(testvals_int32);
- test_Rescale<4, 4>(testvals_int32);
- test_Rescale<4, 5>(testvals_int32);
- test_Rescale<6, 3>(testvals_int32);
- test_Rescale<13, 9>(testvals_int32);
-
- test_ExactMulByPot<0, 0>(testvals_int32);
- test_ExactMulByPot<0, 4>(testvals_int32);
- test_ExactMulByPot<1, 4>(testvals_int32);
- test_ExactMulByPot<3, 2>(testvals_int32);
- test_ExactMulByPot<-4, 5>(testvals_int32);
- test_ExactMulByPot<-2, 6>(testvals_int32);
-
- std::cerr << "All tests passed." << std::endl;
+ gemmlowp::TestFixedPoint<std::int32_t>().RunTests("Scalar int32");
+ gemmlowp::TestFixedPoint<std::int16_t>().RunTests("Scalar int16");
+#ifdef GEMMLOWP_SSE4
+ gemmlowp::TestFixedPoint<__m128i>().RunTests("SSE4 __m128i = int32x4");
+ gemmlowp::TestFixedPoint<gemmlowp::int16x8_m128i>().RunTests(
+ "SSE4 __m128i = int16x8");
+#endif
+#ifdef GEMMLOWP_NEON
+ gemmlowp::TestFixedPoint<int32x4_t>().RunTests("NEON int32x4_t");
+ gemmlowp::TestFixedPoint<int16x8_t>().RunTests("NEON int16x8_t");
+#endif
+#ifdef GEMMLOWP_MSA
+ gemmlowp::TestFixedPoint<v4i32>().RunTests("MSA v4i32");
+ gemmlowp::TestFixedPoint<v8i16>().RunTests("MSA v8i16");
+#endif
}
{
//no signed average in x86 SIMD, go to unsigned
__m128i c128, au, bu, sum;
- c128 = _mm_set1_epi8(0x80); //-128
+ c128 = _mm_set1_epi8((int8_t)0x80); //-128
au = _mm_sub_epi8(a, c128); //add 128
bu = _mm_sub_epi8(b, c128); //add 128
sum = _mm_avg_epu8(au, bu);
{
//no signed average in x86 SIMD, go to unsigned
__m128i cx8000, au, bu, sum;
- cx8000 = _mm_set1_epi16(0x8000); // - 32768
+ cx8000 = _mm_set1_epi16((int16_t)0x8000); // - 32768
au = _mm_sub_epi16(a, cx8000); //add 32768
bu = _mm_sub_epi16(b, cx8000); //add 32768
sum = _mm_avg_epu16(au, bu);
{
// //need to deal with the possibility of internal overflow
__m128i c128, au,bu;
- c128 = _mm_set1_epi8 (128);
+ c128 = _mm_set1_epi8((int8_t)128);
au = _mm_add_epi8( a, c128);
bu = _mm_add_epi8( b, c128);
return vhsubq_u8(au,bu);
{
//need to deal with the possibility of internal overflow
__m128i c8000, au,bu;
- c8000 = _mm_set1_epi16(0x8000);
+ c8000 = _mm_set1_epi16((int16_t)0x8000);
au = _mm_add_epi16( a, c8000);
bu = _mm_add_epi16( b, c8000);
return vhsubq_u16(au,bu);
return _mm_cmpeq_epi16(cmp, a); //a>=b
#else
__m128i c8000, as, bs, m1, m2;
- c8000 = _mm_set1_epi16 (0x8000);
+ c8000 = _mm_set1_epi16 ((int16_t)0x8000);
as = _mm_sub_epi16(a,c8000);
bs = _mm_sub_epi16(b,c8000);
m1 = _mm_cmpgt_epi16(as, bs);
{
//no unsigned chars comparison, only signed available,so need the trick
__m128i c128, as, bs;
- c128 = _mm_set1_epi8 (128);
+ c128 = _mm_set1_epi8 ((int8_t)128);
as = _mm_sub_epi8(a,c128);
bs = _mm_sub_epi8(b,c128);
return _mm_cmpgt_epi8 (as, bs);
{
//no unsigned short comparison, only signed available,so need the trick
__m128i c8000, as, bs;
- c8000 = _mm_set1_epi16 (0x8000);
+ c8000 = _mm_set1_epi16 ((int16_t)0x8000);
as = _mm_sub_epi16(a,c8000);
bs = _mm_sub_epi16(b,c8000);
return _mm_cmpgt_epi16 ( as, bs);
uint16x4_t res64;
__m128i c32767, cfffe, as, bs, res;
c32767 = _mm_set1_epi16 (32767);
- cfffe = _mm_set1_epi16 (0xfffe);
+ cfffe = _mm_set1_epi16 ((int16_t)0xfffe);
as = _mm_sub_epi16 (_pM128i(a), c32767);
bs = _mm_sub_epi16 (_pM128i(b), c32767);
res = _mm_hadd_epi16 (as, bs);
// manual saturation solution looks more optimal than 32 bits conversion one
__m128i cb, c8000, a_signed, saturation_mask, shift_res;
cb = _mm_set1_epi16((1 << (16 - b)) - 1 - 0x8000 );
- c8000 = _mm_set1_epi16 (0x8000);
+ c8000 = _mm_set1_epi16 ((int16_t)0x8000);
//no unsigned shorts comparison in SSE, only signed available, so need the trick
a_signed = _mm_sub_epi16(a, c8000); //go to signed
saturation_mask = _mm_cmpgt_epi16 (a_signed, cb);
// it loads a 32-byte block aligned on a 16-byte boundary and extracts the 16 bytes corresponding to the unaligned access
//If the ptr is aligned then could use __m128i _mm_load_si128 ((__m128i*) ptr) instead;
#define LOAD_SI128(ptr) \
- ( ((unsigned long)(ptr) & 15) == 0 ) ? _mm_load_si128((__m128i*)(ptr)) : _mm_loadu_si128((__m128i*)(ptr))
+ ( ((uintptr_t)(ptr) & 15) == 0 ) ? _mm_load_si128((__m128i*)(ptr)) : _mm_loadu_si128((__m128i*)(ptr))
uint8x16_t vld1q_u8(__transfersize(16) uint8_t const * ptr); // VLD1.8 {d0, d1}, [r0]
#define vld1q_u8 LOAD_SI128
float32x4_t vld1q_f32(__transfersize(4) float32_t const * ptr); // VLD1.32 {d0, d1}, [r0]
_NEON2SSE_INLINE float32x4_t vld1q_f32(__transfersize(4) float32_t const * ptr)
{
- if( (((unsigned long)(ptr)) & 15 ) == 0 ) //16 bits aligned
+ if( (((uintptr_t)(ptr)) & 15 ) == 0 ) //16 bits aligned
return _mm_load_ps(ptr);
else
return _mm_loadu_ps(ptr);
float64x2_t vld1q_f64(__transfersize(4) float64_t const * ptr); // VLD1.64 {d0, d1}, [r0]
_NEON2SSE_INLINE float64x2_t vld1q_f64(__transfersize(4) float64_t const * ptr)
{
- if ((((unsigned long)(ptr)) & 15) == 0) //16 bits aligned
+ if ((((uintptr_t)(ptr)) & 15) == 0) //16 bits aligned
return _mm_load_pd(ptr);
else
return _mm_loadu_pd(ptr);
// If ptr is 16bit aligned and you need to store data without cache pollution then use void _mm_stream_si128 ((__m128i*)ptr, val);
//here we assume the case of NOT 16bit aligned ptr possible. If it is aligned we could to use _mm_store_si128 like shown in the following macro
#define STORE_SI128(ptr, val) \
- (((unsigned long)(ptr) & 15) == 0 ) ? _mm_store_si128 ((__m128i*)(ptr), val) : _mm_storeu_si128 ((__m128i*)(ptr), val);
+ (((uintptr_t)(ptr) & 15) == 0 ) ? _mm_store_si128 ((__m128i*)(ptr), val) : _mm_storeu_si128 ((__m128i*)(ptr), val);
void vst1q_u8(__transfersize(16) uint8_t * ptr, uint8x16_t val); // VST1.8 {d0, d1}, [r0]
#define vst1q_u8 STORE_SI128
void vst1q_f32(__transfersize(4) float32_t * ptr, float32x4_t val); // VST1.32 {d0, d1}, [r0]
_NEON2SSE_INLINE void vst1q_f32(__transfersize(4) float32_t * ptr, float32x4_t val)
{
- if( ((unsigned long)(ptr) & 15) == 0 ) //16 bits aligned
+ if( ((uintptr_t)(ptr) & 15) == 0 ) //16 bits aligned
_mm_store_ps (ptr, val);
else
_mm_storeu_ps (ptr, val);
//***********Store a lane of a vector into memory (extract given lane) *********************
//******************************************************************************************
void vst1q_lane_u8(__transfersize(1) uint8_t * ptr, uint8x16_t val, __constrange(0,15) int lane); // VST1.8 {d0[0]}, [r0]
-#define vst1q_lane_u8(ptr, val, lane) *(ptr) = _MM_EXTRACT_EPI8 (val, lane)
+#define vst1q_lane_u8(ptr, val, lane) *(ptr) = (uint8_t) _MM_EXTRACT_EPI8 (val, lane)
void vst1q_lane_u16(__transfersize(1) uint16_t * ptr, uint16x8_t val, __constrange(0,7) int lane); // VST1.16 {d0[0]}, [r0]
-#define vst1q_lane_u16(ptr, val, lane) *(ptr) = _MM_EXTRACT_EPI16 (val, lane)
+#define vst1q_lane_u16(ptr, val, lane) *(ptr) = (uint16_t) _MM_EXTRACT_EPI16 (val, lane)
void vst1q_lane_u32(__transfersize(1) uint32_t * ptr, uint32x4_t val, __constrange(0,3) int lane); // VST1.32 {d0[0]}, [r0]
-#define vst1q_lane_u32(ptr, val, lane) *(ptr) = _MM_EXTRACT_EPI32 (val, lane)
+#define vst1q_lane_u32(ptr, val, lane) *(ptr) = (uint32_t) _MM_EXTRACT_EPI32 (val, lane)
void vst1q_lane_u64(__transfersize(1) uint64_t * ptr, uint64x2_t val, __constrange(0,1) int lane); // VST1.64 {d0}, [r0]
-#define vst1q_lane_u64(ptr, val, lane) *(ptr) = _MM_EXTRACT_EPI64 (val, lane)
+#define vst1q_lane_u64(ptr, val, lane) *(ptr) = (uint64_t) _MM_EXTRACT_EPI64 (val, lane)
void vst1q_lane_s8(__transfersize(1) int8_t * ptr, int8x16_t val, __constrange(0,15) int lane); // VST1.8 {d0[0]}, [r0]
-#define vst1q_lane_s8(ptr, val, lane) *(ptr) = _MM_EXTRACT_EPI8 (val, lane)
+#define vst1q_lane_s8(ptr, val, lane) *(ptr) = (int8_t) _MM_EXTRACT_EPI8 (val, lane)
void vst1q_lane_s16(__transfersize(1) int16_t * ptr, int16x8_t val, __constrange(0,7) int lane); // VST1.16 {d0[0]}, [r0]
-#define vst1q_lane_s16(ptr, val, lane) *(ptr) = _MM_EXTRACT_EPI16 (val, lane)
+#define vst1q_lane_s16(ptr, val, lane) *(ptr) = (int16_t) _MM_EXTRACT_EPI16 (val, lane)
void vst1q_lane_s32(__transfersize(1) int32_t * ptr, int32x4_t val, __constrange(0,3) int lane); // VST1.32 {d0[0]}, [r0]
#define vst1q_lane_s32(ptr, val, lane) *(ptr) = _MM_EXTRACT_EPI32 (val, lane)
#define vget_lane_f32(vec, lane) vec.m64_f32[lane]
uint8_t vgetq_lane_u8(uint8x16_t vec, __constrange(0,15) int lane); // VMOV.U8 r0, d0[0]
-#define vgetq_lane_u8 _MM_EXTRACT_EPI8
+#define vgetq_lane_u8 (uint8_t) _MM_EXTRACT_EPI8
uint16_t vgetq_lane_u16(uint16x8_t vec, __constrange(0,7) int lane); // VMOV.s16 r0, d0[0]
-#define vgetq_lane_u16 _MM_EXTRACT_EPI16
+#define vgetq_lane_u16 (uint16_t) _MM_EXTRACT_EPI16
uint32_t vgetq_lane_u32(uint32x4_t vec, __constrange(0,3) int lane); // VMOV.32 r0, d0[0]
-#define vgetq_lane_u32 _MM_EXTRACT_EPI32
+#define vgetq_lane_u32 (uint32_t) _MM_EXTRACT_EPI32
int8_t vgetq_lane_s8(int8x16_t vec, __constrange(0,15) int lane); // VMOV.S8 r0, d0[0]
-#define vgetq_lane_s8 vgetq_lane_u8
+#define vgetq_lane_s8 _MM_EXTRACT_EPI8
int16_t vgetq_lane_s16(int16x8_t vec, __constrange(0,7) int lane); // VMOV.S16 r0, d0[0]
-#define vgetq_lane_s16 vgetq_lane_u16
+#define vgetq_lane_s16 _MM_EXTRACT_EPI16
int32_t vgetq_lane_s32(int32x4_t vec, __constrange(0,3) int lane); // VMOV.32 r0, d0[0]
-#define vgetq_lane_s32 vgetq_lane_u32
+#define vgetq_lane_s32 _MM_EXTRACT_EPI32
poly8_t vgetq_lane_p8(poly8x16_t vec, __constrange(0,15) int lane); // VMOV.U8 r0, d0[0]
#define vgetq_lane_p8 vgetq_lane_u8
int64_t vgetq_lane_s64(int64x2_t vec, __constrange(0,1) int lane); // VMOV r0,r0,d0
-#define vgetq_lane_s64 (int64_t) vgetq_lane_u64
+#define vgetq_lane_s64 _MM_EXTRACT_EPI64
uint64_t vgetq_lane_u64(uint64x2_t vec, __constrange(0,1) int lane); // VMOV r0,r0,d0
-#define vgetq_lane_u64 _MM_EXTRACT_EPI64
+#define vgetq_lane_u64 (uint64_t) _MM_EXTRACT_EPI64
// ***************** Set lanes within a vector ********************************************
// **************************************************************************************
_NEON2SSE_INLINE int8x16_t vqabsq_s8(int8x16_t a) // VQABS.S8 q0,q0
{
__m128i c_128, abs, abs_cmp;
- c_128 = _mm_set1_epi8 (0x80); //-128
+ c_128 = _mm_set1_epi8 ((int8_t)0x80); //-128
abs = _mm_abs_epi8 (a);
abs_cmp = _mm_cmpeq_epi8 (abs, c_128);
return _mm_xor_si128 (abs, abs_cmp);
_NEON2SSE_INLINE int16x8_t vqabsq_s16(int16x8_t a) // VQABS.S16 q0,q0
{
__m128i c_32768, abs, abs_cmp;
- c_32768 = _mm_set1_epi16 (0x8000); //-32768
+ c_32768 = _mm_set1_epi16 ((int16_t)0x8000); //-32768
abs = _mm_abs_epi16 (a);
abs_cmp = _mm_cmpeq_epi16 (abs, c_32768);
return _mm_xor_si128 (abs, abs_cmp);
{
__m128i cff, c80, c1, a_mask, a_neg, a_pos, a_comb;
cff = _mm_cmpeq_epi8 (a,a); //0xff
- c80 = _mm_set1_epi8(0x80);
+ c80 = _mm_set1_epi8((int8_t)0x80);
c1 = _mm_set1_epi8(1);
a_mask = _mm_and_si128(a, c80);
a_mask = _mm_cmpeq_epi8(a_mask, c80); //0xff if negative input and 0 if positive