--- /dev/null
+# Build matrix / environment variables are explained on:
+# https://www.appveyor.com/docs/appveyor-yml/
+# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
+
+version: "{build}"
+
+environment:
+ matrix:
+ # AppVeyor currently has no custom job name feature.
+ # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
+ - JOB: Visual Studio 2017
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ CMAKE_GENERATOR: Visual Studio 15 2017
+
+platform:
+ - x86
+ - x64
+
+configuration:
+ - RelWithDebInfo
+ - Debug
+
+build_script:
+ - git submodule update --init --recursive
+ - mkdir build
+ - cd build
+ - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
+ - cmake --version
+ - cmake .. -G "%CMAKE_GENERATOR%"
+ -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
+ - cmake --build . --config "%CONFIGURATION%"
+ - cd ..
+
+test_script:
+ - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd ..
-build_config.mk
-*.a
-*.o
-*.dylib*
-*.so
-*.so.*
-*_test
-db_bench
-leveldbutil
+# Editors.
+*.sw*
+.vscode
+.DS_Store
+
+# Build directory.
+build/
+out/
+# Build matrix / environment variable are explained on:
+# http://about.travis-ci.org/docs/user/build-configuration/
+# This file can be validated on: http://lint.travis-ci.org/
+
+dist: xenial
language: cpp
+
compiler:
-- clang
-- gcc
+ - gcc
+ - clang
os:
-- linux
-- osx
-sudo: false
+ - linux
+ - osx
+
+env:
+ - BUILD_TYPE=Debug
+ - BUILD_TYPE=RelWithDebInfo
+
+matrix:
+ exclude:
+ # GCC fails on recent Travis OSX images.
+ # https://github.com/travis-ci/travis-ci/issues/9640
+ - compiler: gcc
+ os: osx
+
+addons:
+ apt:
+ sources:
+ - llvm-toolchain-xenial-7
+ - ubuntu-toolchain-r-test
+ packages:
+ - clang-7
+ - cmake
+ - gcc-8
+ - g++-8
+ - libgoogle-perftools-dev
+ - libkyotocabinet-dev
+ - libsnappy-dev
+ - libsqlite3-dev
+ - ninja-build
+ homebrew:
+ packages:
+ - crc32c
+ - gperftools
+ - kyotocabinet
+ - gcc@7
+ - ninja
+ - snappy
+ - sqlite3
+
before_install:
-- echo $LANG
-- echo $LC_ALL
+# The Travis VM image for Mac already has a link at /usr/local/include/c++,
+# causing Homebrew's gcc installation to error out. This was reported to
+# Homebrew maintainers at https://github.com/Homebrew/brew/issues/1742 and
+# removing the link emerged as a workaround.
+- if [ "$TRAVIS_OS_NAME" == "osx" ]; then rm -f /usr/local/include/c++ ; fi
+
+install:
+# /usr/bin/gcc is stuck to old versions on both Linux and OSX.
+- if [ "$CXX" = "g++" ]; then export CXX="g++-8" CC="gcc-8"; fi
+- echo ${CC}
+- echo ${CXX}
+- ${CXX} --version
+- cmake --version
+
+before_script:
+- mkdir -p build && cd build
+- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+- cmake --build .
+- cd ..
+
script:
-- make -j 4 check
+- cd build && ctest --verbose && cd ..
+- "if [ -f build/db_bench ] ; then build/db_bench ; fi"
+- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
+- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"
--- /dev/null
+# Copyright 2017 The LevelDB Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+cmake_minimum_required(VERSION 3.9)
+# Keep the version below in sync with the one in db.h
+project(leveldb VERSION 1.21.0 LANGUAGES C CXX)
+
+# This project can use C11, but will gracefully decay down to C89.
+set(CMAKE_C_STANDARD 11)
+set(CMAKE_C_STANDARD_REQUIRED OFF)
+set(CMAKE_C_EXTENSIONS OFF)
+
+# This project requires C++11.
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+if (WIN32)
+ set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)
+ # TODO(cmumford): Make UNICODE configurable for Windows.
+ add_definitions(-D_UNICODE -DUNICODE)
+else (WIN32)
+ set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_POSIX)
+endif (WIN32)
+
+option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
+option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
+option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
+
+include(TestBigEndian)
+test_big_endian(LEVELDB_IS_BIG_ENDIAN)
+
+include(CheckIncludeFile)
+check_include_file("unistd.h" HAVE_UNISTD_H)
+
+include(CheckLibraryExists)
+check_library_exists(crc32c crc32c_value "" HAVE_CRC32C)
+check_library_exists(snappy snappy_compress "" HAVE_SNAPPY)
+check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC)
+
+include(CheckCXXSymbolExists)
+# Using check_cxx_symbol_exists() instead of check_c_symbol_exists() because
+# we're including the header from C++, and feature detection should use the same
+# compiler language that the project will use later. Principles aside, some
+# versions of do not expose fdatasync() in <unistd.h> in standard C mode
+# (-std=c11), but do expose the function in standard C++ mode (-std=c++11).
+check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC)
+check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC)
+
+include(CheckCXXSourceCompiles)
+
+# Test whether -Wthread-safety is available. See
+# https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+# -Werror is necessary because unknown attributes only generate warnings.
+set(OLD_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
+list(APPEND CMAKE_REQUIRED_FLAGS -Werror -Wthread-safety)
+check_cxx_source_compiles("
+struct __attribute__((lockable)) Lock {
+ void Acquire() __attribute__((exclusive_lock_function()));
+ void Release() __attribute__((unlock_function()));
+};
+struct ThreadSafeType {
+ Lock lock_;
+ int data_ __attribute__((guarded_by(lock_)));
+};
+int main() { return 0; }
+" HAVE_CLANG_THREAD_SAFETY)
+set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS})
+
+# Test whether C++17 __has_include is available.
+check_cxx_source_compiles("
+#if defined(__has_include) && __has_include(<string>)
+#include <string>
+#endif
+int main() { std::string str; return 0; }
+" HAVE_CXX17_HAS_INCLUDE)
+
+set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb")
+set(LEVELDB_PORT_CONFIG_DIR "include/port")
+
+configure_file(
+ "${PROJECT_SOURCE_DIR}/port/port_config.h.in"
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+)
+
+include_directories(
+ "${PROJECT_BINARY_DIR}/include"
+ "${PROJECT_SOURCE_DIR}"
+)
+
+if(BUILD_SHARED_LIBS)
+ # Only export LEVELDB_EXPORT symbols from the shared library.
+ add_compile_options(-fvisibility=hidden)
+endif(BUILD_SHARED_LIBS)
+
+add_library(leveldb "")
+target_sources(leveldb
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "${PROJECT_SOURCE_DIR}/db/builder.cc"
+ "${PROJECT_SOURCE_DIR}/db/builder.h"
+ "${PROJECT_SOURCE_DIR}/db/c.cc"
+ "${PROJECT_SOURCE_DIR}/db/db_impl.cc"
+ "${PROJECT_SOURCE_DIR}/db/db_impl.h"
+ "${PROJECT_SOURCE_DIR}/db/db_iter.cc"
+ "${PROJECT_SOURCE_DIR}/db/db_iter.h"
+ "${PROJECT_SOURCE_DIR}/db/dbformat.cc"
+ "${PROJECT_SOURCE_DIR}/db/dbformat.h"
+ "${PROJECT_SOURCE_DIR}/db/dumpfile.cc"
+ "${PROJECT_SOURCE_DIR}/db/filename.cc"
+ "${PROJECT_SOURCE_DIR}/db/filename.h"
+ "${PROJECT_SOURCE_DIR}/db/log_format.h"
+ "${PROJECT_SOURCE_DIR}/db/log_reader.cc"
+ "${PROJECT_SOURCE_DIR}/db/log_reader.h"
+ "${PROJECT_SOURCE_DIR}/db/log_writer.cc"
+ "${PROJECT_SOURCE_DIR}/db/log_writer.h"
+ "${PROJECT_SOURCE_DIR}/db/memtable.cc"
+ "${PROJECT_SOURCE_DIR}/db/memtable.h"
+ "${PROJECT_SOURCE_DIR}/db/repair.cc"
+ "${PROJECT_SOURCE_DIR}/db/skiplist.h"
+ "${PROJECT_SOURCE_DIR}/db/snapshot.h"
+ "${PROJECT_SOURCE_DIR}/db/table_cache.cc"
+ "${PROJECT_SOURCE_DIR}/db/table_cache.h"
+ "${PROJECT_SOURCE_DIR}/db/version_edit.cc"
+ "${PROJECT_SOURCE_DIR}/db/version_edit.h"
+ "${PROJECT_SOURCE_DIR}/db/version_set.cc"
+ "${PROJECT_SOURCE_DIR}/db/version_set.h"
+ "${PROJECT_SOURCE_DIR}/db/write_batch_internal.h"
+ "${PROJECT_SOURCE_DIR}/db/write_batch.cc"
+ "${PROJECT_SOURCE_DIR}/port/port_stdcxx.h"
+ "${PROJECT_SOURCE_DIR}/port/port.h"
+ "${PROJECT_SOURCE_DIR}/port/thread_annotations.h"
+ "${PROJECT_SOURCE_DIR}/table/block_builder.cc"
+ "${PROJECT_SOURCE_DIR}/table/block_builder.h"
+ "${PROJECT_SOURCE_DIR}/table/block.cc"
+ "${PROJECT_SOURCE_DIR}/table/block.h"
+ "${PROJECT_SOURCE_DIR}/table/filter_block.cc"
+ "${PROJECT_SOURCE_DIR}/table/filter_block.h"
+ "${PROJECT_SOURCE_DIR}/table/format.cc"
+ "${PROJECT_SOURCE_DIR}/table/format.h"
+ "${PROJECT_SOURCE_DIR}/table/iterator_wrapper.h"
+ "${PROJECT_SOURCE_DIR}/table/iterator.cc"
+ "${PROJECT_SOURCE_DIR}/table/merger.cc"
+ "${PROJECT_SOURCE_DIR}/table/merger.h"
+ "${PROJECT_SOURCE_DIR}/table/table_builder.cc"
+ "${PROJECT_SOURCE_DIR}/table/table.cc"
+ "${PROJECT_SOURCE_DIR}/table/two_level_iterator.cc"
+ "${PROJECT_SOURCE_DIR}/table/two_level_iterator.h"
+ "${PROJECT_SOURCE_DIR}/util/arena.cc"
+ "${PROJECT_SOURCE_DIR}/util/arena.h"
+ "${PROJECT_SOURCE_DIR}/util/bloom.cc"
+ "${PROJECT_SOURCE_DIR}/util/cache.cc"
+ "${PROJECT_SOURCE_DIR}/util/coding.cc"
+ "${PROJECT_SOURCE_DIR}/util/coding.h"
+ "${PROJECT_SOURCE_DIR}/util/comparator.cc"
+ "${PROJECT_SOURCE_DIR}/util/crc32c.cc"
+ "${PROJECT_SOURCE_DIR}/util/crc32c.h"
+ "${PROJECT_SOURCE_DIR}/util/env.cc"
+ "${PROJECT_SOURCE_DIR}/util/filter_policy.cc"
+ "${PROJECT_SOURCE_DIR}/util/hash.cc"
+ "${PROJECT_SOURCE_DIR}/util/hash.h"
+ "${PROJECT_SOURCE_DIR}/util/logging.cc"
+ "${PROJECT_SOURCE_DIR}/util/logging.h"
+ "${PROJECT_SOURCE_DIR}/util/mutexlock.h"
+ "${PROJECT_SOURCE_DIR}/util/no_destructor.h"
+ "${PROJECT_SOURCE_DIR}/util/options.cc"
+ "${PROJECT_SOURCE_DIR}/util/random.h"
+ "${PROJECT_SOURCE_DIR}/util/status.cc"
+
+ # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
+ $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+ "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+)
+
+if (WIN32)
+ target_sources(leveldb
+ PRIVATE
+ "${PROJECT_SOURCE_DIR}/util/env_windows.cc"
+ "${PROJECT_SOURCE_DIR}/util/windows_logger.h"
+ )
+else (WIN32)
+ target_sources(leveldb
+ PRIVATE
+ "${PROJECT_SOURCE_DIR}/util/env_posix.cc"
+ "${PROJECT_SOURCE_DIR}/util/posix_logger.h"
+ )
+endif (WIN32)
+
+# MemEnv is not part of the interface and could be pulled to a separate library.
+target_sources(leveldb
+ PRIVATE
+ "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.cc"
+ "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.h"
+)
+
+target_include_directories(leveldb
+ PUBLIC
+ $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+)
+target_compile_definitions(leveldb
+ PRIVATE
+ # Used by include/export.h when building shared libraries.
+ LEVELDB_COMPILE_LIBRARY
+ # Used by port/port.h.
+ ${LEVELDB_PLATFORM_NAME}=1
+)
+if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions(leveldb
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(leveldb
+ PUBLIC
+ # Used by include/export.h.
+ LEVELDB_SHARED_LIBRARY
+ )
+endif(BUILD_SHARED_LIBS)
+
+if(HAVE_CLANG_THREAD_SAFETY)
+ target_compile_options(leveldb
+ PUBLIC
+ -Werror -Wthread-safety)
+endif(HAVE_CLANG_THREAD_SAFETY)
+
+if(HAVE_CRC32C)
+ target_link_libraries(leveldb crc32c)
+endif(HAVE_CRC32C)
+if(HAVE_SNAPPY)
+ target_link_libraries(leveldb snappy)
+endif(HAVE_SNAPPY)
+if(HAVE_TCMALLOC)
+ target_link_libraries(leveldb tcmalloc)
+endif(HAVE_TCMALLOC)
+
+# Needed by port_stdcxx.h
+find_package(Threads REQUIRED)
+target_link_libraries(leveldb Threads::Threads)
+
+add_executable(leveldbutil
+ "${PROJECT_SOURCE_DIR}/db/leveldbutil.cc"
+)
+target_link_libraries(leveldbutil leveldb)
+
+if(LEVELDB_BUILD_TESTS)
+ enable_testing()
+
+ function(leveldb_test test_file)
+ get_filename_component(test_target_name "${test_file}" NAME_WE)
+
+ add_executable("${test_target_name}" "")
+ target_sources("${test_target_name}"
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "${PROJECT_SOURCE_DIR}/util/testharness.cc"
+ "${PROJECT_SOURCE_DIR}/util/testharness.h"
+ "${PROJECT_SOURCE_DIR}/util/testutil.cc"
+ "${PROJECT_SOURCE_DIR}/util/testutil.h"
+
+ "${test_file}"
+ )
+ target_link_libraries("${test_target_name}" leveldb)
+ target_compile_definitions("${test_target_name}"
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions("${test_target_name}"
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+
+ add_test(NAME "${test_target_name}" COMMAND "${test_target_name}")
+ endfunction(leveldb_test)
+
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/c_test.c")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/fault_injection_test.cc")
+
+ leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue178_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc")
+
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/no_destructor_test.cc")
+
+ if(NOT BUILD_SHARED_LIBS)
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/autocompact_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/corruption_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/db_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/dbformat_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/filename_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/log_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/recovery_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/skiplist_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/version_edit_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/version_set_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/db/write_batch_test.cc")
+
+ leveldb_test("${PROJECT_SOURCE_DIR}/helpers/memenv/memenv_test.cc")
+
+ leveldb_test("${PROJECT_SOURCE_DIR}/table/filter_block_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/table/table_test.cc")
+
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/arena_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/bloom_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/cache_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/coding_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/crc32c_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/hash_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/logging_test.cc")
+
+ # TODO(costan): This test also uses
+ # "${PROJECT_SOURCE_DIR}/util/env_{posix|windows}_test_helper.h"
+ if (WIN32)
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/env_windows_test.cc")
+ else (WIN32)
+ leveldb_test("${PROJECT_SOURCE_DIR}/util/env_posix_test.cc")
+ endif (WIN32)
+ endif(NOT BUILD_SHARED_LIBS)
+endif(LEVELDB_BUILD_TESTS)
+
+if(LEVELDB_BUILD_BENCHMARKS)
+ function(leveldb_benchmark bench_file)
+ get_filename_component(bench_target_name "${bench_file}" NAME_WE)
+
+ add_executable("${bench_target_name}" "")
+ target_sources("${bench_target_name}"
+ PRIVATE
+ "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
+ "${PROJECT_SOURCE_DIR}/util/histogram.cc"
+ "${PROJECT_SOURCE_DIR}/util/histogram.h"
+ "${PROJECT_SOURCE_DIR}/util/testharness.cc"
+ "${PROJECT_SOURCE_DIR}/util/testharness.h"
+ "${PROJECT_SOURCE_DIR}/util/testutil.cc"
+ "${PROJECT_SOURCE_DIR}/util/testutil.h"
+
+ "${bench_file}"
+ )
+ target_link_libraries("${bench_target_name}" leveldb)
+ target_compile_definitions("${bench_target_name}"
+ PRIVATE
+ ${LEVELDB_PLATFORM_NAME}=1
+ )
+ if (NOT HAVE_CXX17_HAS_INCLUDE)
+ target_compile_definitions("${bench_target_name}"
+ PRIVATE
+ LEVELDB_HAS_PORT_CONFIG_H=1
+ )
+ endif(NOT HAVE_CXX17_HAS_INCLUDE)
+ endfunction(leveldb_benchmark)
+
+ if(NOT BUILD_SHARED_LIBS)
+ leveldb_benchmark("${PROJECT_SOURCE_DIR}/db/db_bench.cc")
+ endif(NOT BUILD_SHARED_LIBS)
+
+ check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)
+ if(HAVE_SQLITE3)
+ leveldb_benchmark("${PROJECT_SOURCE_DIR}/doc/bench/db_bench_sqlite3.cc")
+ target_link_libraries(db_bench_sqlite3 sqlite3)
+ endif(HAVE_SQLITE3)
+
+ # check_library_exists is insufficient here because the library names have
+ # different manglings when compiled with clang or gcc, at least when installed
+ # with Homebrew on Mac.
+ set(OLD_CMAKE_REQURED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES})
+ list(APPEND CMAKE_REQUIRED_LIBRARIES kyotocabinet)
+ check_cxx_source_compiles("
+#include <kcpolydb.h>
+
+int main() {
+ kyotocabinet::TreeDB* db = new kyotocabinet::TreeDB();
+ delete db;
+ return 0;
+}
+ " HAVE_KYOTOCABINET)
+ set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES})
+ if(HAVE_KYOTOCABINET)
+ leveldb_benchmark("${PROJECT_SOURCE_DIR}/doc/bench/db_bench_tree_db.cc")
+ target_link_libraries(db_bench_tree_db kyotocabinet)
+ endif(HAVE_KYOTOCABINET)
+endif(LEVELDB_BUILD_BENCHMARKS)
+
+if(LEVELDB_INSTALL)
+ include(GNUInstallDirs)
+ install(TARGETS leveldb
+ EXPORT leveldbTargets
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ )
+ install(
+ FILES
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
+ "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb
+ )
+
+ include(CMakePackageConfigHelpers)
+ write_basic_package_version_file(
+ "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
+ COMPATIBILITY SameMajorVersion
+ )
+ install(
+ EXPORT leveldbTargets
+ NAMESPACE leveldb::
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+ )
+ install(
+ FILES
+ "${PROJECT_SOURCE_DIR}/cmake/leveldbConfig.cmake"
+ "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb"
+ )
+endif(LEVELDB_INSTALL)
## Writing Code ##
-If your contribution contains code, please make sure that it follows
-[the style guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml).
+If your contribution contains code, please make sure that it follows
+[the style guide](http://google.github.io/styleguide/cppguide.html).
Otherwise we will have to ask you to make changes, and that's no fun for anyone.
+++ /dev/null
-# Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#-----------------------------------------------
-# Uncomment exactly one of the lines labelled (A), (B), and (C) below
-# to switch between compilation modes.
-
-# (A) Production use (optimized mode)
-OPT ?= -O2 -DNDEBUG
-# (B) Debug mode, w/ full line-level debugging symbols
-# OPT ?= -g2
-# (C) Profiling mode: opt, but w/debugging symbols
-# OPT ?= -O2 -g2 -DNDEBUG
-#-----------------------------------------------
-
-# detect what platform we're building on
-$(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \
- ./build_detect_platform build_config.mk ./)
-# this file is generated by the previous line to set build flags and sources
-include build_config.mk
-
-TESTS = \
- db/autocompact_test \
- db/c_test \
- db/corruption_test \
- db/db_test \
- db/dbformat_test \
- db/fault_injection_test \
- db/filename_test \
- db/log_test \
- db/recovery_test \
- db/skiplist_test \
- db/version_edit_test \
- db/version_set_test \
- db/write_batch_test \
- helpers/memenv/memenv_test \
- issues/issue178_test \
- issues/issue200_test \
- table/filter_block_test \
- table/table_test \
- util/arena_test \
- util/bloom_test \
- util/cache_test \
- util/coding_test \
- util/crc32c_test \
- util/env_posix_test \
- util/env_test \
- util/hash_test
-
-UTILS = \
- db/db_bench \
- db/leveldbutil
-
-# Put the object files in a subdirectory, but the application at the top of the object dir.
-PROGNAMES := $(notdir $(TESTS) $(UTILS))
-
-# On Linux may need libkyotocabinet-dev for dependency.
-BENCHMARKS = \
- doc/bench/db_bench_sqlite3 \
- doc/bench/db_bench_tree_db
-
-CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
-CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT)
-
-LDFLAGS += $(PLATFORM_LDFLAGS)
-LIBS += $(PLATFORM_LIBS)
-
-SIMULATOR_OUTDIR=out-ios-x86
-DEVICE_OUTDIR=out-ios-arm
-
-ifeq ($(PLATFORM), IOS)
-# Note: iOS should probably be using libtool, not ar.
-AR=xcrun ar
-SIMULATORSDK=$(shell xcrun -sdk iphonesimulator --show-sdk-path)
-DEVICESDK=$(shell xcrun -sdk iphoneos --show-sdk-path)
-DEVICE_CFLAGS = -isysroot "$(DEVICESDK)" -arch armv6 -arch armv7 -arch armv7s -arch arm64
-SIMULATOR_CFLAGS = -isysroot "$(SIMULATORSDK)" -arch i686 -arch x86_64
-STATIC_OUTDIR=out-ios-universal
-else
-STATIC_OUTDIR=out-static
-SHARED_OUTDIR=out-shared
-STATIC_PROGRAMS := $(addprefix $(STATIC_OUTDIR)/, $(PROGNAMES))
-SHARED_PROGRAMS := $(addprefix $(SHARED_OUTDIR)/, db_bench)
-endif
-
-STATIC_LIBOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(SOURCES:.cc=.o))
-STATIC_MEMENVOBJECTS := $(addprefix $(STATIC_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-DEVICE_LIBOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(SOURCES:.cc=.o))
-DEVICE_MEMENVOBJECTS := $(addprefix $(DEVICE_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-SIMULATOR_LIBOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(SOURCES:.cc=.o))
-SIMULATOR_MEMENVOBJECTS := $(addprefix $(SIMULATOR_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-SHARED_LIBOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(SOURCES:.cc=.o))
-SHARED_MEMENVOBJECTS := $(addprefix $(SHARED_OUTDIR)/, $(MEMENV_SOURCES:.cc=.o))
-
-TESTUTIL := $(STATIC_OUTDIR)/util/testutil.o
-TESTHARNESS := $(STATIC_OUTDIR)/util/testharness.o $(TESTUTIL)
-
-STATIC_TESTOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(TESTS)))
-STATIC_UTILOBJS := $(addprefix $(STATIC_OUTDIR)/, $(addsuffix .o, $(UTILS)))
-STATIC_ALLOBJS := $(STATIC_LIBOBJECTS) $(STATIC_MEMENVOBJECTS) $(STATIC_TESTOBJS) $(STATIC_UTILOBJS) $(TESTHARNESS)
-DEVICE_ALLOBJS := $(DEVICE_LIBOBJECTS) $(DEVICE_MEMENVOBJECTS)
-SIMULATOR_ALLOBJS := $(SIMULATOR_LIBOBJECTS) $(SIMULATOR_MEMENVOBJECTS)
-
-default: all
-
-# Should we build shared libraries?
-ifneq ($(PLATFORM_SHARED_EXT),)
-
-# Many leveldb test apps use non-exported API's. Only build a subset for testing.
-SHARED_ALLOBJS := $(SHARED_LIBOBJECTS) $(SHARED_MEMENVOBJECTS) $(TESTHARNESS)
-
-ifneq ($(PLATFORM_SHARED_VERSIONED),true)
-SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED_LIB2 = $(SHARED_LIB1)
-SHARED_LIB3 = $(SHARED_LIB1)
-SHARED_LIBS = $(SHARED_LIB1)
-SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
-else
-# Update db.h if you change these.
-SHARED_VERSION_MAJOR = 1
-SHARED_VERSION_MINOR = 20
-SHARED_LIB1 = libleveldb.$(PLATFORM_SHARED_EXT)
-SHARED_LIB2 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR)
-SHARED_LIB3 = $(SHARED_LIB1).$(SHARED_VERSION_MAJOR).$(SHARED_VERSION_MINOR)
-SHARED_LIBS = $(SHARED_OUTDIR)/$(SHARED_LIB1) $(SHARED_OUTDIR)/$(SHARED_LIB2) $(SHARED_OUTDIR)/$(SHARED_LIB3)
-$(SHARED_OUTDIR)/$(SHARED_LIB1): $(SHARED_OUTDIR)/$(SHARED_LIB3)
- ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB1)
-$(SHARED_OUTDIR)/$(SHARED_LIB2): $(SHARED_OUTDIR)/$(SHARED_LIB3)
- ln -fs $(SHARED_LIB3) $(SHARED_OUTDIR)/$(SHARED_LIB2)
-SHARED_MEMENVLIB = $(SHARED_OUTDIR)/libmemenv.a
-endif
-
-$(SHARED_OUTDIR)/$(SHARED_LIB3): $(SHARED_LIBOBJECTS)
- $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED_LIB2) $(SHARED_LIBOBJECTS) -o $(SHARED_OUTDIR)/$(SHARED_LIB3) $(LIBS)
-
-endif # PLATFORM_SHARED_EXT
-
-all: $(SHARED_LIBS) $(SHARED_PROGRAMS) $(STATIC_OUTDIR)/libleveldb.a $(STATIC_OUTDIR)/libmemenv.a $(STATIC_PROGRAMS)
-
-check: $(STATIC_PROGRAMS)
- for t in $(notdir $(TESTS)); do echo "***** Running $$t"; $(STATIC_OUTDIR)/$$t || exit 1; done
-
-clean:
- -rm -rf out-static out-shared out-ios-x86 out-ios-arm out-ios-universal
- -rm -f build_config.mk
- -rm -rf ios-x86 ios-arm
-
-$(STATIC_OUTDIR):
- mkdir $@
-
-$(STATIC_OUTDIR)/db: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/helpers/memenv: | $(STATIC_OUTDIR)
- mkdir -p $@
-
-$(STATIC_OUTDIR)/port: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/table: | $(STATIC_OUTDIR)
- mkdir $@
-
-$(STATIC_OUTDIR)/util: | $(STATIC_OUTDIR)
- mkdir $@
-
-.PHONY: STATIC_OBJDIRS
-STATIC_OBJDIRS: \
- $(STATIC_OUTDIR)/db \
- $(STATIC_OUTDIR)/port \
- $(STATIC_OUTDIR)/table \
- $(STATIC_OUTDIR)/util \
- $(STATIC_OUTDIR)/helpers/memenv
-
-$(SHARED_OUTDIR):
- mkdir $@
-
-$(SHARED_OUTDIR)/db: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/helpers/memenv: | $(SHARED_OUTDIR)
- mkdir -p $@
-
-$(SHARED_OUTDIR)/port: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/table: | $(SHARED_OUTDIR)
- mkdir $@
-
-$(SHARED_OUTDIR)/util: | $(SHARED_OUTDIR)
- mkdir $@
-
-.PHONY: SHARED_OBJDIRS
-SHARED_OBJDIRS: \
- $(SHARED_OUTDIR)/db \
- $(SHARED_OUTDIR)/port \
- $(SHARED_OUTDIR)/table \
- $(SHARED_OUTDIR)/util \
- $(SHARED_OUTDIR)/helpers/memenv
-
-$(DEVICE_OUTDIR):
- mkdir $@
-
-$(DEVICE_OUTDIR)/db: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/helpers/memenv: | $(DEVICE_OUTDIR)
- mkdir -p $@
-
-$(DEVICE_OUTDIR)/port: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/table: | $(DEVICE_OUTDIR)
- mkdir $@
-
-$(DEVICE_OUTDIR)/util: | $(DEVICE_OUTDIR)
- mkdir $@
-
-.PHONY: DEVICE_OBJDIRS
-DEVICE_OBJDIRS: \
- $(DEVICE_OUTDIR)/db \
- $(DEVICE_OUTDIR)/port \
- $(DEVICE_OUTDIR)/table \
- $(DEVICE_OUTDIR)/util \
- $(DEVICE_OUTDIR)/helpers/memenv
-
-$(SIMULATOR_OUTDIR):
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/db: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/helpers/memenv: | $(SIMULATOR_OUTDIR)
- mkdir -p $@
-
-$(SIMULATOR_OUTDIR)/port: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/table: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-$(SIMULATOR_OUTDIR)/util: | $(SIMULATOR_OUTDIR)
- mkdir $@
-
-.PHONY: SIMULATOR_OBJDIRS
-SIMULATOR_OBJDIRS: \
- $(SIMULATOR_OUTDIR)/db \
- $(SIMULATOR_OUTDIR)/port \
- $(SIMULATOR_OUTDIR)/table \
- $(SIMULATOR_OUTDIR)/util \
- $(SIMULATOR_OUTDIR)/helpers/memenv
-
-$(STATIC_ALLOBJS): | STATIC_OBJDIRS
-$(DEVICE_ALLOBJS): | DEVICE_OBJDIRS
-$(SIMULATOR_ALLOBJS): | SIMULATOR_OBJDIRS
-$(SHARED_ALLOBJS): | SHARED_OBJDIRS
-
-ifeq ($(PLATFORM), IOS)
-$(DEVICE_OUTDIR)/libleveldb.a: $(DEVICE_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(DEVICE_LIBOBJECTS)
-
-$(SIMULATOR_OUTDIR)/libleveldb.a: $(SIMULATOR_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SIMULATOR_LIBOBJECTS)
-
-$(DEVICE_OUTDIR)/libmemenv.a: $(DEVICE_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(DEVICE_MEMENVOBJECTS)
-
-$(SIMULATOR_OUTDIR)/libmemenv.a: $(SIMULATOR_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SIMULATOR_MEMENVOBJECTS)
-
-# For iOS, create universal object libraries to be used on both the simulator and
-# a device.
-$(STATIC_OUTDIR)/libleveldb.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a
- lipo -create $(DEVICE_OUTDIR)/libleveldb.a $(SIMULATOR_OUTDIR)/libleveldb.a -output $@
-
-$(STATIC_OUTDIR)/libmemenv.a: $(STATIC_OUTDIR) $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a
- lipo -create $(DEVICE_OUTDIR)/libmemenv.a $(SIMULATOR_OUTDIR)/libmemenv.a -output $@
-else
-$(STATIC_OUTDIR)/libleveldb.a:$(STATIC_LIBOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(STATIC_LIBOBJECTS)
-
-$(STATIC_OUTDIR)/libmemenv.a:$(STATIC_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(STATIC_MEMENVOBJECTS)
-endif
-
-$(SHARED_MEMENVLIB):$(SHARED_MEMENVOBJECTS)
- rm -f $@
- $(AR) -rs $@ $(SHARED_MEMENVOBJECTS)
-
-$(STATIC_OUTDIR)/db_bench:db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_bench.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/db_bench_sqlite3:doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_sqlite3.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS)
-
-$(STATIC_OUTDIR)/db_bench_tree_db:doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) doc/bench/db_bench_tree_db.cc $(STATIC_LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS)
-
-$(STATIC_OUTDIR)/leveldbutil:db/leveldbutil.cc $(STATIC_LIBOBJECTS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/leveldbutil.cc $(STATIC_LIBOBJECTS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/arena_test:util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/arena_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/autocompact_test:db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/autocompact_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/bloom_test:util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/bloom_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/c_test:$(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/db/c_test.o $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/cache_test:util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/cache_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/coding_test:util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/coding_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/corruption_test:db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/corruption_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/crc32c_test:util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/crc32c_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/db_test:db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/db_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/dbformat_test:db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/dbformat_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/env_posix_test:util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_posix_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/env_test:util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/env_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/fault_injection_test:db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/fault_injection_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/filename_test:db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/filename_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/filter_block_test:table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) table/filter_block_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/hash_test:util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) util/hash_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/issue178_test:issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue178_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/issue200_test:issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) issues/issue200_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/log_test:db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/log_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/recovery_test:db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/recovery_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/table_test:table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) table/table_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/skiplist_test:db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/skiplist_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/version_edit_test:db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_edit_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/version_set_test:db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/version_set_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/write_batch_test:db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS)
- $(CXX) $(LDFLAGS) $(CXXFLAGS) db/write_batch_test.cc $(STATIC_LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS)
-
-$(STATIC_OUTDIR)/memenv_test:$(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS)
- $(XCRUN) $(CXX) $(LDFLAGS) $(STATIC_OUTDIR)/helpers/memenv/memenv_test.o $(STATIC_OUTDIR)/libmemenv.a $(STATIC_OUTDIR)/libleveldb.a $(TESTHARNESS) -o $@ $(LIBS)
-
-$(SHARED_OUTDIR)/db_bench:$(SHARED_OUTDIR)/db/db_bench.o $(SHARED_LIBS) $(TESTUTIL)
- $(XCRUN) $(CXX) $(LDFLAGS) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SHARED_OUTDIR)/db/db_bench.o $(TESTUTIL) $(SHARED_OUTDIR)/$(SHARED_LIB3) -o $@ $(LIBS)
-
-.PHONY: run-shared
-run-shared: $(SHARED_OUTDIR)/db_bench
- LD_LIBRARY_PATH=$(SHARED_OUTDIR) $(SHARED_OUTDIR)/db_bench
-
-$(SIMULATOR_OUTDIR)/%.o: %.cc
- xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
-
-$(DEVICE_OUTDIR)/%.o: %.cc
- xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
-
-$(SIMULATOR_OUTDIR)/%.o: %.c
- xcrun -sdk iphonesimulator $(CC) $(CFLAGS) $(SIMULATOR_CFLAGS) -c $< -o $@
-
-$(DEVICE_OUTDIR)/%.o: %.c
- xcrun -sdk iphoneos $(CC) $(CFLAGS) $(DEVICE_CFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/%.o: %.cc
- $(CXX) $(CXXFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/%.o: %.c
- $(CC) $(CFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/%.o: %.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/%.o: %.c
- $(CC) $(CFLAGS) $(PLATFORM_SHARED_CFLAGS) -c $< -o $@
-
-$(STATIC_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
-
-$(SHARED_OUTDIR)/port/port_posix_sse.o: port/port_posix_sse.cc
- $(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(PLATFORM_SSEFLAGS) -c $< -o $@
**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
[](https://travis-ci.org/google/leveldb)
+[](https://ci.appveyor.com/project/pwnall/leveldb)
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
# Features
+
* Keys and values are arbitrary byte arrays.
* Data is stored sorted by key.
* Callers can provide a custom comparison function to override the sort order.
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
# Documentation
- [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
+ [LevelDB library documentation](https://github.com/google/leveldb/blob/master/doc/index.md) is online and bundled with the source code.
# Limitations
+
* This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
* Only a single process (possibly multi-threaded) can access a particular database at a time.
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
+# Building
+
+This project supports [CMake](https://cmake.org/) out of the box.
+
+### Build for POSIX
+
+Quick start:
+
+```bash
+mkdir -p build && cd build
+cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
+```
+
+### Building for Windows
+
+First generate the Visual Studio 2017 project/solution files:
+
+```bash
+mkdir -p build
+cd build
+cmake -G "Visual Studio 15" ..
+```
+The default default will build for x86. For 64-bit run:
+
+```bash
+cmake -G "Visual Studio 15 Win64" ..
+```
+
+To compile the Windows solution from the command-line:
+
+```bash
+devenv /build Debug leveldb.sln
+```
+
+or open leveldb.sln in Visual Studio and build from within.
+
+Please see the CMake documentation and `CMakeLists.txt` for more advanced usage.
+
# Contributing to the leveldb Project
+
The leveldb project welcomes contributions. leveldb's primary goal is to be
a reliable and fast key/value store. Changes that are in line with the
features/limitations outlined above, and meet the requirements below,
Contribution requirements:
-1. **POSIX only**. We _generally_ will only accept changes that are both
- compiled, and tested on a POSIX platform - usually Linux. Very small
- changes will sometimes be accepted, but consider that more of an
- exception than the rule.
+1. **Tested platforms only**. We _generally_ will only accept changes for
+ platforms that are compiled and tested. This means POSIX (for Linux and
+ macOS) or Windows. Very small changes will sometimes be accepted, but
+ consider that more of an exception than the rule.
2. **Stable API**. We strive very hard to maintain a stable API. Changes that
require changes for projects using leveldb _might_ be rejected without
a sufficient explanation as to why a new (or changed) test is not required.
## Submitting a Pull Request
+
Before any pull request will be accepted the author must first sign a
Contributor License Agreement (CLA) at https://cla.developers.google.com/.
+++ /dev/null
-#!/bin/sh
-#
-# Detects OS we're compiling on and outputs a file specified by the first
-# argument, which in turn gets read while processing Makefile.
-#
-# The output will set the following variables:
-# CC C Compiler path
-# CXX C++ Compiler path
-# PLATFORM_LDFLAGS Linker flags
-# PLATFORM_LIBS Libraries flags
-# PLATFORM_SHARED_EXT Extension for shared libraries
-# PLATFORM_SHARED_LDFLAGS Flags for building shared library
-# This flag is embedded just before the name
-# of the shared library without intervening spaces
-# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library
-# PLATFORM_CCFLAGS C compiler flags
-# PLATFORM_CXXFLAGS C++ compiler flags. Will contain:
-# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned
-# shared libraries, empty otherwise.
-#
-# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
-#
-# -DLEVELDB_ATOMIC_PRESENT if <atomic> is present
-# -DLEVELDB_PLATFORM_POSIX for Posix-based platforms
-# -DSNAPPY if the Snappy library is present
-#
-
-OUTPUT=$1
-PREFIX=$2
-if test -z "$OUTPUT" || test -z "$PREFIX"; then
- echo "usage: $0 <output-filename> <directory_prefix>" >&2
- exit 1
-fi
-
-# Delete existing output, if it exists
-rm -f $OUTPUT
-touch $OUTPUT
-
-if test -z "$CC"; then
- CC=cc
-fi
-
-if test -z "$CXX"; then
- CXX=g++
-fi
-
-if test -z "$TMPDIR"; then
- TMPDIR=/tmp
-fi
-
-# Detect OS
-if test -z "$TARGET_OS"; then
- TARGET_OS=`uname -s`
-fi
-
-COMMON_FLAGS=
-CROSS_COMPILE=
-PLATFORM_CCFLAGS=
-PLATFORM_CXXFLAGS=
-PLATFORM_LDFLAGS=
-PLATFORM_LIBS=
-PLATFORM_SHARED_EXT="so"
-PLATFORM_SHARED_LDFLAGS="-shared -Wl,-soname -Wl,"
-PLATFORM_SHARED_CFLAGS="-fPIC"
-PLATFORM_SHARED_VERSIONED=true
-PLATFORM_SSEFLAGS=
-
-MEMCMP_FLAG=
-if [ "$CXX" = "g++" ]; then
- # Use libc's memcmp instead of GCC's memcmp. This results in ~40%
- # performance improvement on readrandom under gcc 4.4.3 on Linux/x86.
- MEMCMP_FLAG="-fno-builtin-memcmp"
-fi
-
-case "$TARGET_OS" in
- CYGWIN_*)
- PLATFORM=OS_LINUX
- COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN"
- PLATFORM_LDFLAGS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- Darwin)
- PLATFORM=OS_MACOSX
- COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
- PLATFORM_SHARED_EXT=dylib
- [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
- PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name $INSTALL_PATH/"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- Linux)
- PLATFORM=OS_LINUX
- COMMON_FLAGS="$MEMCMP_FLAG -pthread -DOS_LINUX"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- SunOS)
- PLATFORM=OS_SOLARIS
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_SOLARIS"
- PLATFORM_LIBS="-lpthread -lrt"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- FreeBSD)
- PLATFORM=OS_FREEBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_FREEBSD"
- PLATFORM_LIBS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- NetBSD)
- PLATFORM=OS_NETBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_NETBSD"
- PLATFORM_LIBS="-lpthread -lgcc_s"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- OpenBSD)
- PLATFORM=OS_OPENBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_OPENBSD"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- DragonFly)
- PLATFORM=OS_DRAGONFLYBSD
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_DRAGONFLYBSD"
- PLATFORM_LIBS="-lpthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- ;;
- OS_ANDROID_CROSSCOMPILE)
- PLATFORM=OS_ANDROID
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX"
- PLATFORM_LDFLAGS="" # All pthread features are in the Android C library
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- CROSS_COMPILE=true
- ;;
- HP-UX)
- PLATFORM=OS_HPUX
- COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_HPUX"
- PLATFORM_LDFLAGS="-pthread"
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- # man ld: +h internal_name
- PLATFORM_SHARED_LDFLAGS="-shared -Wl,+h -Wl,"
- ;;
- IOS)
- PLATFORM=IOS
- COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX"
- [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
- PORT_FILE=port/port_posix.cc
- PORT_SSE_FILE=port/port_posix_sse.cc
- PLATFORM_SHARED_EXT=
- PLATFORM_SHARED_LDFLAGS=
- PLATFORM_SHARED_CFLAGS=
- PLATFORM_SHARED_VERSIONED=
- ;;
- *)
- echo "Unknown platform!" >&2
- exit 1
-esac
-
-# We want to make a list of all cc files within util, db, table, and helpers
-# except for the test and benchmark files. By default, find will output a list
-# of all files matching either rule, so we need to append -print to make the
-# prune take effect.
-DIRS="$PREFIX/db $PREFIX/util $PREFIX/table"
-
-set -f # temporarily disable globbing so that our patterns aren't expanded
-PRUNE_TEST="-name *test*.cc -prune"
-PRUNE_BENCH="-name *_bench.cc -prune"
-PRUNE_TOOL="-name leveldbutil.cc -prune"
-PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "`
-
-set +f # re-enable globbing
-
-# The sources consist of the portable files, plus the platform-specific port
-# file.
-echo "SOURCES=$PORTABLE_FILES $PORT_FILE $PORT_SSE_FILE" >> $OUTPUT
-echo "MEMENV_SOURCES=helpers/memenv/memenv.cc" >> $OUTPUT
-
-if [ "$CROSS_COMPILE" = "true" ]; then
- # Cross-compiling; do not try any compilation tests.
- true
-else
- CXXOUTPUT="${TMPDIR}/leveldb_build_detect_platform-cxx.$$"
-
- # If -std=c++0x works, use <atomic> as fallback for when memory barriers
- # are not available.
- $CXX $CXXFLAGS -std=c++0x -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF
- #include <atomic>
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_ATOMIC_PRESENT"
- PLATFORM_CXXFLAGS="-std=c++0x"
- else
- COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX"
- fi
-
- # Test whether Snappy library is installed
- # http://code.google.com/p/snappy/
- $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF
- #include <snappy.h>
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- COMMON_FLAGS="$COMMON_FLAGS -DSNAPPY"
- PLATFORM_LIBS="$PLATFORM_LIBS -lsnappy"
- fi
-
- # Test whether tcmalloc is available
- $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -ltcmalloc 2>/dev/null <<EOF
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- PLATFORM_LIBS="$PLATFORM_LIBS -ltcmalloc"
- fi
-
- rm -f $CXXOUTPUT 2>/dev/null
-
- # Test if gcc SSE 4.2 is supported
- $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -msse4.2 2>/dev/null <<EOF
- int main() {}
-EOF
- if [ "$?" = 0 ]; then
- PLATFORM_SSEFLAGS="-msse4.2"
- fi
-
- rm -f $CXXOUTPUT 2>/dev/null
-fi
-
-# Use the SSE 4.2 CRC32C intrinsics iff runtime checks indicate compiler supports them.
-if [ -n "$PLATFORM_SSEFLAGS" ]; then
- PLATFORM_SSEFLAGS="$PLATFORM_SSEFLAGS -DLEVELDB_PLATFORM_POSIX_SSE"
-fi
-
-PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
-PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS"
-
-echo "CC=$CC" >> $OUTPUT
-echo "CXX=$CXX" >> $OUTPUT
-echo "PLATFORM=$PLATFORM" >> $OUTPUT
-echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> $OUTPUT
-echo "PLATFORM_LIBS=$PLATFORM_LIBS" >> $OUTPUT
-echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> $OUTPUT
-echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> $OUTPUT
-echo "PLATFORM_SSEFLAGS=$PLATFORM_SSEFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> $OUTPUT
-echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> $OUTPUT
-echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> $OUTPUT
--- /dev/null
+include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
}
// Finish and check for builder errors
+ s = builder->Finish();
if (s.ok()) {
- s = builder->Finish();
- if (s.ok()) {
- meta->file_size = builder->FileSize();
- assert(meta->file_size > 0);
- }
- } else {
- builder->Abandon();
+ meta->file_size = builder->FileSize();
+ assert(meta->file_size > 0);
}
delete builder;
s = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (s.ok()) {
// Verify that the table is usable
// *meta will be filled with metadata about the generated table.
// If no data is present in *iter, meta->file_size will be set to
// zero, and no Table file will be produced.
-extern Status BuildTable(const std::string& dbname,
- Env* env,
- const Options& options,
- TableCache* table_cache,
- Iterator* iter,
- FileMetaData* meta);
+Status BuildTable(const std::string& dbname,
+ Env* env,
+ const Options& options,
+ TableCache* table_cache,
+ Iterator* iter,
+ FileMetaData* meta);
} // namespace leveldb
#include "leveldb/c.h"
#include <stdlib.h>
-#include <unistd.h>
#include "leveldb/cache.h"
#include "leveldb/comparator.h"
#include "leveldb/db.h"
};
static bool SaveError(char** errptr, const Status& s) {
- assert(errptr != NULL);
+ assert(errptr != nullptr);
if (s.ok()) {
return false;
- } else if (*errptr == NULL) {
+ } else if (*errptr == nullptr) {
*errptr = strdup(s.ToString().c_str());
} else {
// TODO(sanjay): Merge with existing error?
char** errptr) {
DB* db;
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
- return NULL;
+ return nullptr;
}
leveldb_t* result = new leveldb_t;
result->rep = db;
const char* key, size_t keylen,
size_t* vallen,
char** errptr) {
- char* result = NULL;
+ char* result = nullptr;
std::string tmp;
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
if (s.ok()) {
// We use strdup() since we expect human readable output.
return strdup(tmp.c_str());
} else {
- return NULL;
+ return nullptr;
}
}
const char* limit_key, size_t limit_key_len) {
Slice a, b;
db->rep->CompactRange(
- // Pass NULL Slice if corresponding "const char*" is NULL
- (start_key ? (a = Slice(start_key, start_key_len), &a) : NULL),
- (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : NULL));
+ // Pass null Slice if corresponding "const char*" is null
+ (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
+ (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
}
void leveldb_destroy_db(
}
void leveldb_writebatch_iterate(
- leveldb_writebatch_t* b,
+ const leveldb_writebatch_t* b,
void* state,
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
void (*deleted)(void*, const char* k, size_t klen)) {
b->rep.Iterate(&handler);
}
+void leveldb_writebatch_append(leveldb_writebatch_t *destination,
+ const leveldb_writebatch_t *source) {
+ destination->rep.Append(source->rep);
+}
+
leveldb_options_t* leveldb_options_create() {
return new leveldb_options_t;
}
}
void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) {
- opt->rep.env = (env ? env->rep : NULL);
+ opt->rep.env = (env ? env->rep : nullptr);
}
void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
- opt->rep.info_log = (l ? l->rep : NULL);
+ opt->rep.info_log = (l ? l->rep : nullptr);
}
void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
opt->rep.block_restart_interval = n;
}
+void leveldb_options_set_max_file_size(leveldb_options_t* opt, size_t s) {
+ opt->rep.max_file_size = s;
+}
+
void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
opt->rep.compression = static_cast<CompressionType>(t);
}
};
Wrapper* wrapper = new Wrapper;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
- wrapper->state_ = NULL;
+ wrapper->state_ = nullptr;
wrapper->destructor_ = &Wrapper::DoNothing;
return wrapper;
}
void leveldb_readoptions_set_snapshot(
leveldb_readoptions_t* opt,
const leveldb_snapshot_t* snap) {
- opt->rep.snapshot = (snap ? snap->rep : NULL);
+ opt->rep.snapshot = (snap ? snap->rep : nullptr);
}
leveldb_writeoptions_t* leveldb_writeoptions_create() {
delete env;
}
+char* leveldb_env_get_test_directory(leveldb_env_t* env) {
+ std::string result;
+ if (!env->rep->GetTestDirectory(&result).ok()) {
+ return nullptr;
+ }
+
+ char* buffer = static_cast<char*>(malloc(result.size() + 1));
+ memcpy(buffer, result.data(), result.size());
+ buffer[result.size()] = '\0';
+ return buffer;
+}
+
void leveldb_free(void* ptr) {
free(ptr);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
const char* phase = "";
-static char dbname[200];
static void StartPhase(const char* name) {
fprintf(stderr, "=== Test %s\n", name);
phase = name;
}
-static const char* GetTempDir(void) {
- const char* ret = getenv("TEST_TMPDIR");
- if (ret == NULL || ret[0] == '\0')
- ret = "/tmp";
- return ret;
-}
-
#define CheckNoError(err) \
if ((err) != NULL) { \
fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \
leveldb_options_t* options;
leveldb_readoptions_t* roptions;
leveldb_writeoptions_t* woptions;
+ char* dbname;
char* err = NULL;
int run = -1;
CheckCondition(leveldb_major_version() >= 1);
CheckCondition(leveldb_minor_version() >= 1);
- snprintf(dbname, sizeof(dbname),
- "%s/leveldb_c_test-%d",
- GetTempDir(),
- ((int) geteuid()));
-
StartPhase("create_objects");
cmp = leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName);
env = leveldb_create_default_env();
cache = leveldb_cache_create_lru(100000);
+ dbname = leveldb_env_get_test_directory(env);
+ CheckCondition(dbname != NULL);
options = leveldb_options_create();
leveldb_options_set_comparator(options, cmp);
leveldb_options_set_max_open_files(options, 10);
leveldb_options_set_block_size(options, 1024);
leveldb_options_set_block_restart_interval(options, 8);
+ leveldb_options_set_max_file_size(options, 3 << 20);
leveldb_options_set_compression(options, leveldb_no_compression);
roptions = leveldb_readoptions_create();
leveldb_writebatch_clear(wb);
leveldb_writebatch_put(wb, "bar", 3, "b", 1);
leveldb_writebatch_put(wb, "box", 3, "c", 1);
- leveldb_writebatch_delete(wb, "bar", 3);
+
+ leveldb_writebatch_t* wb2 = leveldb_writebatch_create();
+ leveldb_writebatch_delete(wb2, "bar", 3);
+ leveldb_writebatch_append(wb, wb2);
+ leveldb_writebatch_destroy(wb2);
+
leveldb_write(db, woptions, wb, &err);
CheckNoError(err);
CheckGet(db, roptions, "foo", "hello");
CheckGet(db, roptions, "bar", NULL);
CheckGet(db, roptions, "box", "c");
+
int pos = 0;
leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel);
CheckCondition(pos == 3);
leveldb_options_destroy(options);
leveldb_readoptions_destroy(roptions);
leveldb_writeoptions_destroy(woptions);
+ leveldb_free(dbname);
leveldb_cache_destroy(cache);
leveldb_comparator_destroy(cmp);
leveldb_env_destroy(env);
#include "leveldb/db.h"
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/stat.h>
#include <sys/types.h>
#include "leveldb/cache.h"
-#include "leveldb/env.h"
#include "leveldb/table.h"
#include "leveldb/write_batch.h"
#include "db/db_impl.h"
tiny_cache_ = NewLRUCache(100);
options_.env = &env_;
options_.block_cache = tiny_cache_;
- dbname_ = test::TmpDir() + "/corruption_test";
+ dbname_ = "/memenv/corruption_test";
DestroyDB(dbname_, options_);
- db_ = NULL;
+ db_ = nullptr;
options_.create_if_missing = true;
Reopen();
options_.create_if_missing = false;
~CorruptionTest() {
delete db_;
- DestroyDB(dbname_, Options());
delete tiny_cache_;
}
Status TryReopen() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
return DB::Open(options_, dbname_, &db_);
}
void RepairDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
ASSERT_OK(::leveldb::RepairDB(dbname_, options_));
}
void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
// Pick file to corrupt
std::vector<std::string> filenames;
- ASSERT_OK(env_.GetChildren(dbname_, &filenames));
+ ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames));
uint64_t number;
FileType type;
std::string fname;
}
ASSERT_TRUE(!fname.empty()) << filetype;
- struct stat sbuf;
- if (stat(fname.c_str(), &sbuf) != 0) {
- const char* msg = strerror(errno);
- ASSERT_TRUE(false) << fname << ": " << msg;
- }
+ uint64_t file_size;
+ ASSERT_OK(env_.target()->GetFileSize(fname, &file_size));
if (offset < 0) {
// Relative to end of file; make it absolute
- if (-offset > sbuf.st_size) {
+ if (-offset > file_size) {
offset = 0;
} else {
- offset = sbuf.st_size + offset;
+ offset = file_size + offset;
}
}
- if (offset > sbuf.st_size) {
- offset = sbuf.st_size;
+ if (offset > file_size) {
+ offset = file_size;
}
- if (offset + bytes_to_corrupt > sbuf.st_size) {
- bytes_to_corrupt = sbuf.st_size - offset;
+ if (offset + bytes_to_corrupt > file_size) {
+ bytes_to_corrupt = file_size - offset;
}
// Do it
std::string contents;
- Status s = ReadFileToString(Env::Default(), fname, &contents);
+ Status s = ReadFileToString(env_.target(), fname, &contents);
ASSERT_TRUE(s.ok()) << s.ToString();
for (int i = 0; i < bytes_to_corrupt; i++) {
contents[i + offset] ^= 0x80;
}
- s = WriteStringToFile(Env::Default(), contents, fname);
+ s = WriteStringToFile(env_.target(), contents, fname);
ASSERT_TRUE(s.ok()) << s.ToString();
}
Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
- dbi->TEST_CompactRange(1, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+ dbi->TEST_CompactRange(1, nullptr, nullptr);
Corrupt(kTableFile, 100, 1);
Check(90, 99);
Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
- dbi->TEST_CompactRange(1, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
+ dbi->TEST_CompactRange(1, nullptr, nullptr);
Corrupt(kTableFile, 100, 1);
RepairDB();
ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello"));
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable();
- dbi->TEST_CompactRange(0, NULL, NULL);
+ dbi->TEST_CompactRange(0, nullptr, nullptr);
Corrupt(kDescriptorFile, 0, 1000);
Status s = TryReopen();
Corrupt(kTableFile, 100, 1);
env_.SleepForMicroseconds(100000);
}
- dbi->CompactRange(NULL, NULL);
+ dbi->CompactRange(nullptr, nullptr);
// Write must fail because of corrupted table
std::string tmp1, tmp2;
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
-#include "db/db_impl.h"
-#include "db/version_set.h"
#include "leveldb/cache.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
#include "leveldb/write_batch.h"
#include "port/port.h"
#include "util/crc32c.h"
// seekrandom -- N random seeks
// open -- cost of opening a DB
// crc32c -- repeated crc32c of 4K of data
-// acquireload -- load N*1000 times
// Meta operations:
// compact -- Compact the entire DB
// stats -- Print DB stats
"crc32c,"
"snappycomp,"
"snappyuncomp,"
- "acquireload,"
;
// Number of key/values to place in database
static bool FLAGS_reuse_logs = false;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
namespace leveldb {
namespace {
-leveldb::Env* g_env = NULL;
+leveldb::Env* g_env = nullptr;
// Helper for quickly generating random data.
class RandomGenerator {
// State shared by all concurrent executions of the same benchmark.
struct SharedState {
port::Mutex mu;
- port::CondVar cv;
- int total;
+ port::CondVar cv GUARDED_BY(mu);
+ int total GUARDED_BY(mu);
// Each thread goes through the following states:
// (1) initializing
// (3) running
// (4) done
- int num_initialized;
- int num_done;
- bool start;
+ int num_initialized GUARDED_BY(mu);
+ int num_done GUARDED_BY(mu);
+ bool start GUARDED_BY(mu);
- SharedState() : cv(&mu) { }
+ SharedState(int total)
+ : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) { }
};
// Per-thread state for concurrent executions of the same benchmark.
kMajorVersion, kMinorVersion);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
public:
Benchmark()
- : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : NULL),
+ : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
filter_policy_(FLAGS_bloom_bits >= 0
? NewBloomFilterPolicy(FLAGS_bloom_bits)
- : NULL),
- db_(NULL),
+ : nullptr),
+ db_(nullptr),
num_(FLAGS_num),
value_size_(FLAGS_value_size),
entries_per_batch_(1),
Open();
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
entries_per_batch_ = 1;
write_options_ = WriteOptions();
- void (Benchmark::*method)(ThreadState*) = NULL;
+ void (Benchmark::*method)(ThreadState*) = nullptr;
bool fresh_db = false;
int num_threads = FLAGS_threads;
method = &Benchmark::Compact;
} else if (name == Slice("crc32c")) {
method = &Benchmark::Crc32c;
- } else if (name == Slice("acquireload")) {
- method = &Benchmark::AcquireLoad;
} else if (name == Slice("snappycomp")) {
method = &Benchmark::SnappyCompress;
} else if (name == Slice("snappyuncomp")) {
if (FLAGS_use_existing_db) {
fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
name.ToString().c_str());
- method = NULL;
+ method = nullptr;
} else {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
DestroyDB(FLAGS_db, Options());
Open();
}
}
- if (method != NULL) {
+ if (method != nullptr) {
RunBenchmark(num_threads, name, method);
}
}
void RunBenchmark(int n, Slice name,
void (Benchmark::*method)(ThreadState*)) {
- SharedState shared;
- shared.total = n;
- shared.num_initialized = 0;
- shared.num_done = 0;
- shared.start = false;
+ SharedState shared(n);
ThreadArg* arg = new ThreadArg[n];
for (int i = 0; i < n; i++) {
thread->stats.AddMessage(label);
}
- void AcquireLoad(ThreadState* thread) {
- int dummy;
- port::AtomicPointer ap(&dummy);
- int count = 0;
- void *ptr = NULL;
- thread->stats.AddMessage("(each op is 1000 loads)");
- while (count < 100000) {
- for (int i = 0; i < 1000; i++) {
- ptr = ap.Acquire_Load();
- }
- count++;
- thread->stats.FinishedSingleOp();
- }
- if (ptr == NULL) exit(1); // Disable unused variable warning.
- }
-
void SnappyCompress(ThreadState* thread) {
RandomGenerator gen;
Slice input = gen.Generate(Options().block_size);
}
void Open() {
- assert(db_ == NULL);
+ assert(db_ == nullptr);
Options options;
options.env = g_env;
options.create_if_missing = !FLAGS_use_existing_db;
}
void Compact(ThreadState* thread) {
- db_->CompactRange(NULL, NULL);
+ db_->CompactRange(nullptr, nullptr);
}
void PrintStats(const char* key) {
leveldb::g_env = leveldb::Env::Default();
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
+ if (FLAGS_db == nullptr) {
leveldb::g_env->GetTestDirectory(&default_db_path);
default_db_path += "/dbbench";
FLAGS_db = default_db_path.c_str();
#include "db/db_impl.h"
+#include <stdint.h>
+#include <stdio.h>
+
#include <algorithm>
+#include <atomic>
#include <set>
#include <string>
-#include <stdint.h>
-#include <stdio.h>
#include <vector>
+
#include "db/builder.h"
#include "db/db_iter.h"
#include "db/dbformat.h"
explicit CompactionState(Compaction* c)
: compaction(c),
- outfile(NULL),
- builder(NULL),
+ outfile(nullptr),
+ builder(nullptr),
total_bytes(0) {
}
};
// Fix user-supplied options to be reasonable
-template <class T,class V>
+template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
const Options& src) {
Options result = src;
result.comparator = icmp;
- result.filter_policy = (src.filter_policy != NULL) ? ipolicy : NULL;
+ result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
ClipToRange(&result.max_file_size, 1<<20, 1<<30);
ClipToRange(&result.block_size, 1<<10, 4<<20);
- if (result.info_log == NULL) {
+ if (result.info_log == nullptr) {
// Open a log file in the same directory as the db
src.env->CreateDir(dbname); // In case it does not exist
src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname));
Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log);
if (!s.ok()) {
// No place suitable for logging
- result.info_log = NULL;
+ result.info_log = nullptr;
}
}
- if (result.block_cache == NULL) {
+ if (result.block_cache == nullptr) {
result.block_cache = NewLRUCache(8 << 20);
}
return result;
}
+static int TableCacheSize(const Options& sanitized_options) {
+ // Reserve ten files or so for other uses and give the rest to TableCache.
+ return sanitized_options.max_open_files - kNumNonTableCacheFiles;
+}
+
DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
: env_(raw_options.env),
internal_comparator_(raw_options.comparator),
owns_info_log_(options_.info_log != raw_options.info_log),
owns_cache_(options_.block_cache != raw_options.block_cache),
dbname_(dbname),
- db_lock_(NULL),
- shutting_down_(NULL),
- bg_cv_(&mutex_),
- mem_(NULL),
- imm_(NULL),
- logfile_(NULL),
+ table_cache_(new TableCache(dbname_, options_, TableCacheSize(options_))),
+ db_lock_(nullptr),
+ shutting_down_(false),
+ background_work_finished_signal_(&mutex_),
+ mem_(nullptr),
+ imm_(nullptr),
+ has_imm_(false),
+ logfile_(nullptr),
logfile_number_(0),
- log_(NULL),
+ log_(nullptr),
seed_(0),
tmp_batch_(new WriteBatch),
- bg_compaction_scheduled_(false),
- manual_compaction_(NULL) {
- has_imm_.Release_Store(NULL);
-
- // Reserve ten files or so for other uses and give the rest to TableCache.
- const int table_cache_size = options_.max_open_files - kNumNonTableCacheFiles;
- table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
-
- versions_ = new VersionSet(dbname_, &options_, table_cache_,
- &internal_comparator_);
-}
+ background_compaction_scheduled_(false),
+ manual_compaction_(nullptr),
+ versions_(new VersionSet(dbname_, &options_, table_cache_,
+ &internal_comparator_)) {}
DBImpl::~DBImpl() {
- // Wait for background work to finish
+ // Wait for background work to finish.
mutex_.Lock();
- shutting_down_.Release_Store(this); // Any non-NULL value is ok
- while (bg_compaction_scheduled_) {
- bg_cv_.Wait();
+ shutting_down_.store(true, std::memory_order_release);
+ while (background_compaction_scheduled_) {
+ background_work_finished_signal_.Wait();
}
mutex_.Unlock();
- if (db_lock_ != NULL) {
+ if (db_lock_ != nullptr) {
env_->UnlockFile(db_lock_);
}
delete versions_;
- if (mem_ != NULL) mem_->Unref();
- if (imm_ != NULL) imm_->Unref();
+ if (mem_ != nullptr) mem_->Unref();
+ if (imm_ != nullptr) imm_->Unref();
delete tmp_batch_;
delete log_;
delete logfile_;
}
void DBImpl::DeleteObsoleteFiles() {
+ mutex_.AssertHeld();
+
if (!bg_error_.ok()) {
// After a background error, we don't know whether a new version may
// or may not have been committed, so we cannot safely garbage collect.
versions_->AddLiveFiles(&live);
std::vector<std::string> filenames;
- env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
+ env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
uint64_t number;
FileType type;
for (size_t i = 0; i < filenames.size(); i++) {
table_cache_->Evict(number);
}
Log(options_.info_log, "Delete type=%d #%lld\n",
- int(type),
+ static_cast<int>(type),
static_cast<unsigned long long>(number));
env_->DeleteFile(dbname_ + "/" + filenames[i]);
}
// committed only when the descriptor is created, and this directory
// may already exist from a previous failed creation attempt.
env_->CreateDir(dbname_);
- assert(db_lock_ == NULL);
+ assert(db_lock_ == nullptr);
Status s = env_->LockFile(LockFileName(dbname_), &db_lock_);
if (!s.ok()) {
return s;
Env* env;
Logger* info_log;
const char* fname;
- Status* status; // NULL if options_.paranoid_checks==false
+ Status* status; // null if options_.paranoid_checks==false
virtual void Corruption(size_t bytes, const Status& s) {
Log(info_log, "%s%s: dropping %d bytes; %s",
- (this->status == NULL ? "(ignoring error) " : ""),
+ (this->status == nullptr ? "(ignoring error) " : ""),
fname, static_cast<int>(bytes), s.ToString().c_str());
- if (this->status != NULL && this->status->ok()) *this->status = s;
+ if (this->status != nullptr && this->status->ok()) *this->status = s;
}
};
reporter.env = env_;
reporter.info_log = options_.info_log;
reporter.fname = fname.c_str();
- reporter.status = (options_.paranoid_checks ? &status : NULL);
+ reporter.status = (options_.paranoid_checks ? &status : nullptr);
// We intentionally make log::Reader do checksumming even if
// paranoid_checks==false so that corruptions cause entire commits
// to be skipped instead of propagating bad information (like overly
Slice record;
WriteBatch batch;
int compactions = 0;
- MemTable* mem = NULL;
+ MemTable* mem = nullptr;
while (reader.ReadRecord(&record, &scratch) &&
status.ok()) {
if (record.size() < 12) {
}
WriteBatchInternal::SetContents(&batch, record);
- if (mem == NULL) {
+ if (mem == nullptr) {
mem = new MemTable(internal_comparator_);
mem->Ref();
}
if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
compactions++;
*save_manifest = true;
- status = WriteLevel0Table(mem, edit, NULL);
+ status = WriteLevel0Table(mem, edit, nullptr);
mem->Unref();
- mem = NULL;
+ mem = nullptr;
if (!status.ok()) {
// Reflect errors immediately so that conditions like full
// file-systems cause the DB::Open() to fail.
// See if we should keep reusing the last log file.
if (status.ok() && options_.reuse_logs && last_log && compactions == 0) {
- assert(logfile_ == NULL);
- assert(log_ == NULL);
- assert(mem_ == NULL);
+ assert(logfile_ == nullptr);
+ assert(log_ == nullptr);
+ assert(mem_ == nullptr);
uint64_t lfile_size;
if (env_->GetFileSize(fname, &lfile_size).ok() &&
env_->NewAppendableFile(fname, &logfile_).ok()) {
Log(options_.info_log, "Reusing old log %s \n", fname.c_str());
log_ = new log::Writer(logfile_, lfile_size);
logfile_number_ = log_number;
- if (mem != NULL) {
+ if (mem != nullptr) {
mem_ = mem;
- mem = NULL;
+ mem = nullptr;
} else {
- // mem can be NULL if lognum exists but was empty.
+ // mem can be nullptr if lognum exists but was empty.
mem_ = new MemTable(internal_comparator_);
mem_->Ref();
}
}
}
- if (mem != NULL) {
+ if (mem != nullptr) {
// mem did not get reused; compact it.
if (status.ok()) {
*save_manifest = true;
- status = WriteLevel0Table(mem, edit, NULL);
+ status = WriteLevel0Table(mem, edit, nullptr);
}
mem->Unref();
}
if (s.ok() && meta.file_size > 0) {
const Slice min_user_key = meta.smallest.user_key();
const Slice max_user_key = meta.largest.user_key();
- if (base != NULL) {
+ if (base != nullptr) {
level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
}
edit->AddFile(level, meta.number, meta.file_size,
void DBImpl::CompactMemTable() {
mutex_.AssertHeld();
- assert(imm_ != NULL);
+ assert(imm_ != nullptr);
// Save the contents of the memtable as a new Table
VersionEdit edit;
Status s = WriteLevel0Table(imm_, &edit, base);
base->Unref();
- if (s.ok() && shutting_down_.Acquire_Load()) {
+ if (s.ok() && shutting_down_.load(std::memory_order_acquire)) {
s = Status::IOError("Deleting DB during memtable compaction");
}
if (s.ok()) {
// Commit to the new state
imm_->Unref();
- imm_ = NULL;
- has_imm_.Release_Store(NULL);
+ imm_ = nullptr;
+ has_imm_.store(false, std::memory_order_release);
DeleteObsoleteFiles();
} else {
RecordBackgroundError(s);
}
}
}
- TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
+ TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
for (int level = 0; level < max_level_with_files; level++) {
TEST_CompactRange(level, begin, end);
}
}
-void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
+void DBImpl::TEST_CompactRange(int level, const Slice* begin,
+ const Slice* end) {
assert(level >= 0);
assert(level + 1 < config::kNumLevels);
ManualCompaction manual;
manual.level = level;
manual.done = false;
- if (begin == NULL) {
- manual.begin = NULL;
+ if (begin == nullptr) {
+ manual.begin = nullptr;
} else {
begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
manual.begin = &begin_storage;
}
- if (end == NULL) {
- manual.end = NULL;
+ if (end == nullptr) {
+ manual.end = nullptr;
} else {
end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
manual.end = &end_storage;
}
MutexLock l(&mutex_);
- while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) {
- if (manual_compaction_ == NULL) { // Idle
+ while (!manual.done && !shutting_down_.load(std::memory_order_acquire) &&
+ bg_error_.ok()) {
+ if (manual_compaction_ == nullptr) { // Idle
manual_compaction_ = &manual;
MaybeScheduleCompaction();
} else { // Running either my compaction or another compaction.
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
}
}
if (manual_compaction_ == &manual) {
// Cancel my manual compaction since we aborted early for some reason.
- manual_compaction_ = NULL;
+ manual_compaction_ = nullptr;
}
}
Status DBImpl::TEST_CompactMemTable() {
- // NULL batch means just wait for earlier writes to be done
- Status s = Write(WriteOptions(), NULL);
+ // nullptr batch means just wait for earlier writes to be done
+ Status s = Write(WriteOptions(), nullptr);
if (s.ok()) {
// Wait until the compaction completes
MutexLock l(&mutex_);
- while (imm_ != NULL && bg_error_.ok()) {
- bg_cv_.Wait();
+ while (imm_ != nullptr && bg_error_.ok()) {
+ background_work_finished_signal_.Wait();
}
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
s = bg_error_;
}
}
mutex_.AssertHeld();
if (bg_error_.ok()) {
bg_error_ = s;
- bg_cv_.SignalAll();
+ background_work_finished_signal_.SignalAll();
}
}
void DBImpl::MaybeScheduleCompaction() {
mutex_.AssertHeld();
- if (bg_compaction_scheduled_) {
+ if (background_compaction_scheduled_) {
// Already scheduled
- } else if (shutting_down_.Acquire_Load()) {
+ } else if (shutting_down_.load(std::memory_order_acquire)) {
// DB is being deleted; no more background compactions
} else if (!bg_error_.ok()) {
// Already got an error; no more changes
- } else if (imm_ == NULL &&
- manual_compaction_ == NULL &&
+ } else if (imm_ == nullptr &&
+ manual_compaction_ == nullptr &&
!versions_->NeedsCompaction()) {
// No work to be done
} else {
- bg_compaction_scheduled_ = true;
+ background_compaction_scheduled_ = true;
env_->Schedule(&DBImpl::BGWork, this);
}
}
void DBImpl::BackgroundCall() {
MutexLock l(&mutex_);
- assert(bg_compaction_scheduled_);
- if (shutting_down_.Acquire_Load()) {
+ assert(background_compaction_scheduled_);
+ if (shutting_down_.load(std::memory_order_acquire)) {
// No more background work when shutting down.
} else if (!bg_error_.ok()) {
// No more background work after a background error.
BackgroundCompaction();
}
- bg_compaction_scheduled_ = false;
+ background_compaction_scheduled_ = false;
// Previous compaction may have produced too many files in a level,
// so reschedule another compaction if needed.
MaybeScheduleCompaction();
- bg_cv_.SignalAll();
+ background_work_finished_signal_.SignalAll();
}
void DBImpl::BackgroundCompaction() {
mutex_.AssertHeld();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
CompactMemTable();
return;
}
Compaction* c;
- bool is_manual = (manual_compaction_ != NULL);
+ bool is_manual = (manual_compaction_ != nullptr);
InternalKey manual_end;
if (is_manual) {
ManualCompaction* m = manual_compaction_;
c = versions_->CompactRange(m->level, m->begin, m->end);
- m->done = (c == NULL);
- if (c != NULL) {
+ m->done = (c == nullptr);
+ if (c != nullptr) {
manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
}
Log(options_.info_log,
}
Status status;
- if (c == NULL) {
+ if (c == nullptr) {
// Nothing to do
} else if (!is_manual && c->IsTrivialMove()) {
// Move file to next level
if (status.ok()) {
// Done
- } else if (shutting_down_.Acquire_Load()) {
+ } else if (shutting_down_.load(std::memory_order_acquire)) {
// Ignore compaction errors found during shutting down
} else {
Log(options_.info_log,
m->tmp_storage = manual_end;
m->begin = &m->tmp_storage;
}
- manual_compaction_ = NULL;
+ manual_compaction_ = nullptr;
}
}
void DBImpl::CleanupCompaction(CompactionState* compact) {
mutex_.AssertHeld();
- if (compact->builder != NULL) {
+ if (compact->builder != nullptr) {
// May happen if we get a shutdown call in the middle of compaction
compact->builder->Abandon();
delete compact->builder;
} else {
- assert(compact->outfile == NULL);
+ assert(compact->outfile == nullptr);
}
delete compact->outfile;
for (size_t i = 0; i < compact->outputs.size(); i++) {
}
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
- assert(compact != NULL);
- assert(compact->builder == NULL);
+ assert(compact != nullptr);
+ assert(compact->builder == nullptr);
uint64_t file_number;
{
mutex_.Lock();
Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
Iterator* input) {
- assert(compact != NULL);
- assert(compact->outfile != NULL);
- assert(compact->builder != NULL);
+ assert(compact != nullptr);
+ assert(compact->outfile != nullptr);
+ assert(compact->builder != nullptr);
const uint64_t output_number = compact->current_output()->number;
assert(output_number != 0);
compact->current_output()->file_size = current_bytes;
compact->total_bytes += current_bytes;
delete compact->builder;
- compact->builder = NULL;
+ compact->builder = nullptr;
// Finish and check for file errors
if (s.ok()) {
s = compact->outfile->Close();
}
delete compact->outfile;
- compact->outfile = NULL;
+ compact->outfile = nullptr;
if (s.ok() && current_entries > 0) {
// Verify that the table is usable
compact->compaction->level() + 1);
assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
- assert(compact->builder == NULL);
- assert(compact->outfile == NULL);
+ assert(compact->builder == nullptr);
+ assert(compact->outfile == nullptr);
if (snapshots_.empty()) {
compact->smallest_snapshot = versions_->LastSequence();
} else {
- compact->smallest_snapshot = snapshots_.oldest()->number_;
+ compact->smallest_snapshot = snapshots_.oldest()->sequence_number();
}
// Release mutex while we're actually doing the compaction work
std::string current_user_key;
bool has_current_user_key = false;
SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
- for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
+ for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire); ) {
// Prioritize immutable compaction work
- if (has_imm_.NoBarrier_Load() != NULL) {
+ if (has_imm_.load(std::memory_order_relaxed)) {
const uint64_t imm_start = env_->NowMicros();
mutex_.Lock();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
CompactMemTable();
- bg_cv_.SignalAll(); // Wakeup MakeRoomForWrite() if necessary
+ // Wake up MakeRoomForWrite() if necessary.
+ background_work_finished_signal_.SignalAll();
}
mutex_.Unlock();
imm_micros += (env_->NowMicros() - imm_start);
Slice key = input->key();
if (compact->compaction->ShouldStopBefore(key) &&
- compact->builder != NULL) {
+ compact->builder != nullptr) {
status = FinishCompactionOutputFile(compact, input);
if (!status.ok()) {
break;
if (!drop) {
// Open output file if necessary
- if (compact->builder == NULL) {
+ if (compact->builder == nullptr) {
status = OpenCompactionOutputFile(compact);
if (!status.ok()) {
break;
input->Next();
}
- if (status.ok() && shutting_down_.Acquire_Load()) {
+ if (status.ok() && shutting_down_.load(std::memory_order_acquire)) {
status = Status::IOError("Deleting DB during compaction");
}
- if (status.ok() && compact->builder != NULL) {
+ if (status.ok() && compact->builder != nullptr) {
status = FinishCompactionOutputFile(compact, input);
}
if (status.ok()) {
status = input->status();
}
delete input;
- input = NULL;
+ input = nullptr;
CompactionStats stats;
stats.micros = env_->NowMicros() - start_micros - imm_micros;
}
namespace {
+
struct IterState {
- port::Mutex* mu;
- Version* version;
- MemTable* mem;
- MemTable* imm;
+ port::Mutex* const mu;
+ Version* const version GUARDED_BY(mu);
+ MemTable* const mem GUARDED_BY(mu);
+ MemTable* const imm GUARDED_BY(mu);
+
+ IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version)
+ : mu(mutex), version(version), mem(mem), imm(imm) { }
};
static void CleanupIteratorState(void* arg1, void* arg2) {
IterState* state = reinterpret_cast<IterState*>(arg1);
state->mu->Lock();
state->mem->Unref();
- if (state->imm != NULL) state->imm->Unref();
+ if (state->imm != nullptr) state->imm->Unref();
state->version->Unref();
state->mu->Unlock();
delete state;
}
-} // namespace
+
+} // anonymous namespace
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
SequenceNumber* latest_snapshot,
uint32_t* seed) {
- IterState* cleanup = new IterState;
mutex_.Lock();
*latest_snapshot = versions_->LastSequence();
std::vector<Iterator*> list;
list.push_back(mem_->NewIterator());
mem_->Ref();
- if (imm_ != NULL) {
+ if (imm_ != nullptr) {
list.push_back(imm_->NewIterator());
imm_->Ref();
}
NewMergingIterator(&internal_comparator_, &list[0], list.size());
versions_->current()->Ref();
- cleanup->mu = &mutex_;
- cleanup->mem = mem_;
- cleanup->imm = imm_;
- cleanup->version = versions_->current();
- internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL);
+ IterState* cleanup = new IterState(&mutex_, mem_, imm_, versions_->current());
+ internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
*seed = ++seed_;
mutex_.Unlock();
Status s;
MutexLock l(&mutex_);
SequenceNumber snapshot;
- if (options.snapshot != NULL) {
- snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
+ if (options.snapshot != nullptr) {
+ snapshot =
+ static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number();
} else {
snapshot = versions_->LastSequence();
}
MemTable* imm = imm_;
Version* current = versions_->current();
mem->Ref();
- if (imm != NULL) imm->Ref();
+ if (imm != nullptr) imm->Ref();
current->Ref();
bool have_stat_update = false;
LookupKey lkey(key, snapshot);
if (mem->Get(lkey, value, &s)) {
// Done
- } else if (imm != NULL && imm->Get(lkey, value, &s)) {
+ } else if (imm != nullptr && imm->Get(lkey, value, &s)) {
// Done
} else {
s = current->Get(options, lkey, value, &stats);
MaybeScheduleCompaction();
}
mem->Unref();
- if (imm != NULL) imm->Unref();
+ if (imm != nullptr) imm->Unref();
current->Unref();
return s;
}
Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
return NewDBIterator(
this, user_comparator(), iter,
- (options.snapshot != NULL
- ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
+ (options.snapshot != nullptr
+ ? static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number()
: latest_snapshot),
seed);
}
return snapshots_.New(versions_->LastSequence());
}
-void DBImpl::ReleaseSnapshot(const Snapshot* s) {
+void DBImpl::ReleaseSnapshot(const Snapshot* snapshot) {
MutexLock l(&mutex_);
- snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
+ snapshots_.Delete(static_cast<const SnapshotImpl*>(snapshot));
}
// Convenience methods
}
// May temporarily unlock and wait.
- Status status = MakeRoomForWrite(my_batch == NULL);
+ Status status = MakeRoomForWrite(my_batch == nullptr);
uint64_t last_sequence = versions_->LastSequence();
Writer* last_writer = &w;
- if (status.ok() && my_batch != NULL) { // NULL batch is for compactions
+ if (status.ok() && my_batch != nullptr) { // nullptr batch is for compactions
WriteBatch* updates = BuildBatchGroup(&last_writer);
WriteBatchInternal::SetSequence(updates, last_sequence + 1);
last_sequence += WriteBatchInternal::Count(updates);
}
// REQUIRES: Writer list must be non-empty
-// REQUIRES: First writer must have a non-NULL batch
+// REQUIRES: First writer must have a non-null batch
WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
+ mutex_.AssertHeld();
assert(!writers_.empty());
Writer* first = writers_.front();
WriteBatch* result = first->batch;
- assert(result != NULL);
+ assert(result != nullptr);
size_t size = WriteBatchInternal::ByteSize(first->batch);
break;
}
- if (w->batch != NULL) {
+ if (w->batch != nullptr) {
size += WriteBatchInternal::ByteSize(w->batch);
if (size > max_size) {
// Do not make batch too big
(mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
// There is room in current memtable
break;
- } else if (imm_ != NULL) {
+ } else if (imm_ != nullptr) {
// We have filled up the current memtable, but the previous
// one is still being compacted, so we wait.
Log(options_.info_log, "Current memtable full; waiting...\n");
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
} else if (versions_->NumLevelFiles(0) >= config::kL0_StopWritesTrigger) {
// There are too many level-0 files.
Log(options_.info_log, "Too many L0 files; waiting...\n");
- bg_cv_.Wait();
+ background_work_finished_signal_.Wait();
} else {
// Attempt to switch to a new memtable and trigger compaction of old
assert(versions_->PrevLogNumber() == 0);
uint64_t new_log_number = versions_->NewFileNumber();
- WritableFile* lfile = NULL;
+ WritableFile* lfile = nullptr;
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
if (!s.ok()) {
// Avoid chewing through file number space in a tight loop.
logfile_number_ = new_log_number;
log_ = new log::Writer(lfile);
imm_ = mem_;
- has_imm_.Release_Store(imm_);
+ has_imm_.store(true, std::memory_order_release);
mem_ = new MemTable(internal_comparator_);
mem_->Ref();
force = false; // Do not force another compaction if have room
Status DB::Open(const Options& options, const std::string& dbname,
DB** dbptr) {
- *dbptr = NULL;
+ *dbptr = nullptr;
DBImpl* impl = new DBImpl(options, dbname);
impl->mutex_.Lock();
// Recover handles create_if_missing, error_if_exists
bool save_manifest = false;
Status s = impl->Recover(&edit, &save_manifest);
- if (s.ok() && impl->mem_ == NULL) {
+ if (s.ok() && impl->mem_ == nullptr) {
// Create new log and a corresponding memtable.
uint64_t new_log_number = impl->versions_->NewFileNumber();
WritableFile* lfile;
}
impl->mutex_.Unlock();
if (s.ok()) {
- assert(impl->mem_ != NULL);
+ assert(impl->mem_ != nullptr);
*dbptr = impl;
} else {
delete impl;
Status DestroyDB(const std::string& dbname, const Options& options) {
Env* env = options.env;
std::vector<std::string> filenames;
- // Ignore error in case directory does not exist
- env->GetChildren(dbname, &filenames);
- if (filenames.empty()) {
+ Status result = env->GetChildren(dbname, &filenames);
+ if (!result.ok()) {
+ // Ignore error in case directory does not exist
return Status::OK();
}
FileLock* lock;
const std::string lockname = LockFileName(dbname);
- Status result = env->LockFile(lockname, &lock);
+ result = env->LockFile(lockname, &lock);
if (result.ok()) {
uint64_t number;
FileType type;
#ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
#define STORAGE_LEVELDB_DB_DB_IMPL_H_
+#include <atomic>
#include <deque>
#include <set>
+#include <string>
+
#include "db/dbformat.h"
#include "db/log_writer.h"
#include "db/snapshot.h"
void MaybeIgnoreError(Status* s) const;
// Delete any unneeded files and stale in-memory entries.
- void DeleteObsoleteFiles();
+ void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Compact the in-memory write buffer to disk. Switches to a new
// log-file/memtable and writes a new descriptor iff successful.
Status MakeRoomForWrite(bool force /* compact even if there is room? */)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
- WriteBatch* BuildBatchGroup(Writer** last_writer);
+ WriteBatch* BuildBatchGroup(Writer** last_writer)
+ EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void RecordBackgroundError(const Status& s);
void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
static void BGWork(void* db);
void BackgroundCall();
- void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
void CleanupCompaction(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
Status DoCompactionWork(CompactionState* compact)
const InternalKeyComparator internal_comparator_;
const InternalFilterPolicy internal_filter_policy_;
const Options options_; // options_.comparator == &internal_comparator_
- bool owns_info_log_;
- bool owns_cache_;
+ const bool owns_info_log_;
+ const bool owns_cache_;
const std::string dbname_;
// table_cache_ provides its own synchronization
- TableCache* table_cache_;
+ TableCache* const table_cache_;
- // Lock over the persistent DB state. Non-NULL iff successfully acquired.
+ // Lock over the persistent DB state. Non-null iff successfully acquired.
FileLock* db_lock_;
// State below is protected by mutex_
port::Mutex mutex_;
- port::AtomicPointer shutting_down_;
- port::CondVar bg_cv_; // Signalled when background work finishes
+ std::atomic<bool> shutting_down_;
+ port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_);
MemTable* mem_;
- MemTable* imm_; // Memtable being compacted
- port::AtomicPointer has_imm_; // So bg thread can detect non-NULL imm_
+ MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted
+ std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_
WritableFile* logfile_;
- uint64_t logfile_number_;
+ uint64_t logfile_number_ GUARDED_BY(mutex_);
log::Writer* log_;
- uint32_t seed_; // For sampling.
+ uint32_t seed_ GUARDED_BY(mutex_); // For sampling.
// Queue of writers.
- std::deque<Writer*> writers_;
- WriteBatch* tmp_batch_;
+ std::deque<Writer*> writers_ GUARDED_BY(mutex_);
+ WriteBatch* tmp_batch_ GUARDED_BY(mutex_);
- SnapshotList snapshots_;
+ SnapshotList snapshots_ GUARDED_BY(mutex_);
// Set of table files to protect from deletion because they are
// part of ongoing compactions.
- std::set<uint64_t> pending_outputs_;
+ std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_);
// Has a background compaction been scheduled or is running?
- bool bg_compaction_scheduled_;
+ bool background_compaction_scheduled_ GUARDED_BY(mutex_);
// Information for a manual compaction
struct ManualCompaction {
int level;
bool done;
- const InternalKey* begin; // NULL means beginning of key range
- const InternalKey* end; // NULL means end of key range
+ const InternalKey* begin; // null means beginning of key range
+ const InternalKey* end; // null means end of key range
InternalKey tmp_storage; // Used to keep track of compaction progress
};
- ManualCompaction* manual_compaction_;
+ ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
- VersionSet* versions_;
+ VersionSet* const versions_;
// Have we encountered a background error in paranoid mode?
- Status bg_error_;
+ Status bg_error_ GUARDED_BY(mutex_);
// Per level compaction stats. stats_[level] stores the stats for
// compactions that produced data for the specified "level".
this->bytes_written += c.bytes_written;
}
};
- CompactionStats stats_[config::kNumLevels];
+ CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
// No copying allowed
DBImpl(const DBImpl&);
// Sanitize db options. The caller should delete result.info_log if
// it is not equal to src.info_log.
-extern Options SanitizeOptions(const std::string& db,
- const InternalKeyComparator* icmp,
- const InternalFilterPolicy* ipolicy,
- const Options& src);
+Options SanitizeOptions(const std::string& db,
+ const InternalKeyComparator* icmp,
+ const InternalFilterPolicy* ipolicy,
+ const Options& src);
} // namespace leveldb
direction_(kForward),
valid_(false),
rnd_(seed),
- bytes_counter_(RandomPeriod()) {
+ bytes_until_read_sampling_(RandomCompactionPeriod()) {
}
virtual ~DBIter() {
delete iter_;
}
}
- // Pick next gap with average value of config::kReadBytesPeriod.
- ssize_t RandomPeriod() {
+ // Picks the number of bytes that can be read until a compaction is scheduled.
+ size_t RandomCompactionPeriod() {
return rnd_.Uniform(2*config::kReadBytesPeriod);
}
bool valid_;
Random rnd_;
- ssize_t bytes_counter_;
+ size_t bytes_until_read_sampling_;
// No copying allowed
DBIter(const DBIter&);
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
Slice k = iter_->key();
- ssize_t n = k.size() + iter_->value().size();
- bytes_counter_ -= n;
- while (bytes_counter_ < 0) {
- bytes_counter_ += RandomPeriod();
+
+ size_t bytes_read = k.size() + iter_->value().size();
+ while (bytes_until_read_sampling_ < bytes_read) {
+ bytes_until_read_sampling_ += RandomCompactionPeriod();
db_->RecordReadSample(k);
}
+ assert(bytes_until_read_sampling_ >= bytes_read);
+ bytes_until_read_sampling_ -= bytes_read;
+
if (!ParseInternalKey(k, ikey)) {
status_ = Status::Corruption("corrupted internal key in DBIter");
return false;
// Return a new iterator that converts internal keys (yielded by
// "*internal_iter") that were live at the specified "sequence" number
// into appropriate user keys.
-extern Iterator* NewDBIterator(
- DBImpl* db,
- const Comparator* user_key_comparator,
- Iterator* internal_iter,
- SequenceNumber sequence,
- uint32_t seed);
+Iterator* NewDBIterator(DBImpl* db,
+ const Comparator* user_key_comparator,
+ Iterator* internal_iter,
+ SequenceNumber sequence,
+ uint32_t seed);
} // namespace leveldb
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include <atomic>
+#include <string>
+
#include "leveldb/db.h"
#include "leveldb/filter_policy.h"
#include "db/db_impl.h"
#include "leveldb/cache.h"
#include "leveldb/env.h"
#include "leveldb/table.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/logging.h"
#include "util/mutexlock.h"
return r;
}
+static std::string RandomKey(Random* rnd) {
+ int len = (rnd->OneIn(3)
+ ? 1 // Short sometimes to encourage collisions
+ : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
+ return test::RandomKey(rnd, len);
+}
+
namespace {
class AtomicCounter {
private:
port::Mutex mu_;
- int count_;
+ int count_ GUARDED_BY(mu_);
public:
AtomicCounter() : count_(0) { }
void Increment() {
IncrementBy(1);
}
- void IncrementBy(int count) {
+ void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ += count;
}
- int Read() {
+ int Read() LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
return count_;
}
- void Reset() {
+ void Reset() LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ = 0;
}
void DelayMilliseconds(int millis) {
Env::Default()->SleepForMicroseconds(millis * 1000);
}
-}
+} // namespace
+
+// Test Env to override default Env behavior for testing.
+class TestEnv : public EnvWrapper {
+ public:
+ explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {}
-// Special Env used to delay background operations
+ void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; }
+
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* result) override {
+ Status s = target()->GetChildren(dir, result);
+ if (!s.ok() || !ignore_dot_files_) {
+ return s;
+ }
+
+ std::vector<std::string>::iterator it = result->begin();
+ while (it != result->end()) {
+ if ((*it == ".") || (*it == "..")) {
+ it = result->erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ return s;
+ }
+
+ private:
+ bool ignore_dot_files_;
+};
+
+// Special Env used to delay background operations.
class SpecialEnv : public EnvWrapper {
public:
- // sstable/log Sync() calls are blocked while this pointer is non-NULL.
- port::AtomicPointer delay_data_sync_;
+ // sstable/log Sync() calls are blocked while this pointer is non-null.
+ std::atomic<bool> delay_data_sync_;
// sstable/log Sync() calls return an error.
- port::AtomicPointer data_sync_error_;
+ std::atomic<bool> data_sync_error_;
- // Simulate no-space errors while this pointer is non-NULL.
- port::AtomicPointer no_space_;
+ // Simulate no-space errors while this pointer is non-null.
+ std::atomic<bool> no_space_;
- // Simulate non-writable file system while this pointer is non-NULL
- port::AtomicPointer non_writable_;
+ // Simulate non-writable file system while this pointer is non-null.
+ std::atomic<bool> non_writable_;
- // Force sync of manifest files to fail while this pointer is non-NULL
- port::AtomicPointer manifest_sync_error_;
+ // Force sync of manifest files to fail while this pointer is non-null.
+ std::atomic<bool> manifest_sync_error_;
- // Force write to manifest files to fail while this pointer is non-NULL
- port::AtomicPointer manifest_write_error_;
+ // Force write to manifest files to fail while this pointer is non-null.
+ std::atomic<bool> manifest_write_error_;
bool count_random_reads_;
AtomicCounter random_read_counter_;
- explicit SpecialEnv(Env* base) : EnvWrapper(base) {
- delay_data_sync_.Release_Store(NULL);
- data_sync_error_.Release_Store(NULL);
- no_space_.Release_Store(NULL);
- non_writable_.Release_Store(NULL);
- count_random_reads_ = false;
- manifest_sync_error_.Release_Store(NULL);
- manifest_write_error_.Release_Store(NULL);
+ explicit SpecialEnv(Env* base) : EnvWrapper(base),
+ delay_data_sync_(false),
+ data_sync_error_(false),
+ no_space_(false),
+ non_writable_(false),
+ manifest_sync_error_(false),
+ manifest_write_error_(false),
+ count_random_reads_(false) {
}
Status NewWritableFile(const std::string& f, WritableFile** r) {
class DataFile : public WritableFile {
private:
- SpecialEnv* env_;
- WritableFile* base_;
+ SpecialEnv* const env_;
+ WritableFile* const base_;
public:
DataFile(SpecialEnv* env, WritableFile* base)
}
~DataFile() { delete base_; }
Status Append(const Slice& data) {
- if (env_->no_space_.Acquire_Load() != NULL) {
+ if (env_->no_space_.load(std::memory_order_acquire)) {
// Drop writes on the floor
return Status::OK();
} else {
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
- if (env_->data_sync_error_.Acquire_Load() != NULL) {
+ if (env_->data_sync_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated data sync error");
}
- while (env_->delay_data_sync_.Acquire_Load() != NULL) {
+ while (env_->delay_data_sync_.load(std::memory_order_acquire)) {
DelayMilliseconds(100);
}
return base_->Sync();
ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
~ManifestFile() { delete base_; }
Status Append(const Slice& data) {
- if (env_->manifest_write_error_.Acquire_Load() != NULL) {
+ if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated writer error");
} else {
return base_->Append(data);
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
- if (env_->manifest_sync_error_.Acquire_Load() != NULL) {
+ if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
return Status::IOError("simulated sync error");
} else {
return base_->Sync();
}
};
- if (non_writable_.Acquire_Load() != NULL) {
+ if (non_writable_.load(std::memory_order_acquire)) {
return Status::IOError("simulated write error");
}
Status s = target()->NewWritableFile(f, r);
if (s.ok()) {
- if (strstr(f.c_str(), ".ldb") != NULL ||
- strstr(f.c_str(), ".log") != NULL) {
+ if (strstr(f.c_str(), ".ldb") != nullptr ||
+ strstr(f.c_str(), ".log") != nullptr) {
*r = new DataFile(this, *r);
- } else if (strstr(f.c_str(), "MANIFEST") != NULL) {
+ } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
*r = new ManifestFile(this, *r);
}
}
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options());
- db_ = NULL;
+ db_ = nullptr;
Reopen();
}
return reinterpret_cast<DBImpl*>(db_);
}
- void Reopen(Options* options = NULL) {
+ void Reopen(Options* options = nullptr) {
ASSERT_OK(TryReopen(options));
}
void Close() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
- void DestroyAndReopen(Options* options = NULL) {
+ void DestroyAndReopen(Options* options = nullptr) {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
DestroyDB(dbname_, Options());
ASSERT_OK(TryReopen(options));
}
Status TryReopen(Options* options) {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
Options opts;
- if (options != NULL) {
+ if (options != nullptr) {
opts = *options;
} else {
opts = CurrentOptions();
return db_->Delete(WriteOptions(), k);
}
- std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
+ std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
ReadOptions options;
options.snapshot = snapshot;
std::string result;
ASSERT_TRUE(
db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
&property));
- return atoi(property.c_str());
+ return std::stoi(property);
}
int TotalTableFiles() {
}
// Do n memtable compactions, each of which produces an sstable
- // covering the range [small,large].
- void MakeTables(int n, const std::string& small, const std::string& large) {
+ // covering the range [small_key,large_key].
+ void MakeTables(int n, const std::string& small_key,
+ const std::string& large_key) {
for (int i = 0; i < n; i++) {
- Put(small, "begin");
- Put(large, "end");
+ Put(small_key, "begin");
+ Put(large_key, "end");
dbfull()->TEST_CompactMemTable();
}
}
TEST(DBTest, Empty) {
do {
- ASSERT_TRUE(db_ != NULL);
+ ASSERT_TRUE(db_ != nullptr);
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
+TEST(DBTest, EmptyKey) {
+ do {
+ ASSERT_OK(Put("", "v1"));
+ ASSERT_EQ("v1", Get(""));
+ ASSERT_OK(Put("", "v2"));
+ ASSERT_EQ("v2", Get(""));
+ } while (ChangeOptions());
+}
+
+TEST(DBTest, EmptyValue) {
+ do {
+ ASSERT_OK(Put("key", "v1"));
+ ASSERT_EQ("v1", Get("key"));
+ ASSERT_OK(Put("key", ""));
+ ASSERT_EQ("", Get("key"));
+ ASSERT_OK(Put("key", "v2"));
+ ASSERT_EQ("v2", Get("key"));
+ } while (ChangeOptions());
+}
+
TEST(DBTest, ReadWrite) {
do {
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
- env_->delay_data_sync_.Release_Store(env_); // Block sync calls
- Put("k1", std::string(100000, 'x')); // Fill memtable
- Put("k2", std::string(100000, 'y')); // Trigger compaction
+ // Block sync calls.
+ env_->delay_data_sync_.store(true, std::memory_order_release);
+ Put("k1", std::string(100000, 'x')); // Fill memtable.
+ Put("k2", std::string(100000, 'y')); // Trigger compaction.
ASSERT_EQ("v1", Get("foo"));
- env_->delay_data_sync_.Release_Store(NULL); // Release sync calls
+ // Release sync calls.
+ env_->delay_data_sync_.store(false, std::memory_order_release);
} while (ChangeOptions());
}
ASSERT_OK(Put("foo", "v1"));
std::string val;
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
- int mem_usage = atoi(val.c_str());
+ int mem_usage = std::stoi(val);
ASSERT_GT(mem_usage, 0);
ASSERT_LT(mem_usage, 5*1024*1024);
} while (ChangeOptions());
} while (ChangeOptions());
}
+TEST(DBTest, GetIdenticalSnapshots) {
+ do {
+ // Try with both a short key and a long key
+ for (int i = 0; i < 2; i++) {
+ std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
+ ASSERT_OK(Put(key, "v1"));
+ const Snapshot* s1 = db_->GetSnapshot();
+ const Snapshot* s2 = db_->GetSnapshot();
+ const Snapshot* s3 = db_->GetSnapshot();
+ ASSERT_OK(Put(key, "v2"));
+ ASSERT_EQ("v2", Get(key));
+ ASSERT_EQ("v1", Get(key, s1));
+ ASSERT_EQ("v1", Get(key, s2));
+ ASSERT_EQ("v1", Get(key, s3));
+ db_->ReleaseSnapshot(s1);
+ dbfull()->TEST_CompactMemTable();
+ ASSERT_EQ("v2", Get(key));
+ ASSERT_EQ("v1", Get(key, s2));
+ db_->ReleaseSnapshot(s2);
+ ASSERT_EQ("v1", Get(key, s3));
+ db_->ReleaseSnapshot(s3);
+ }
+ } while (ChangeOptions());
+}
+
+TEST(DBTest, IterateOverEmptySnapshot) {
+ do {
+ const Snapshot* snapshot = db_->GetSnapshot();
+ ReadOptions read_options;
+ read_options.snapshot = snapshot;
+ ASSERT_OK(Put("foo", "v1"));
+ ASSERT_OK(Put("foo", "v2"));
+
+ Iterator* iterator1 = db_->NewIterator(read_options);
+ iterator1->SeekToFirst();
+ ASSERT_TRUE(!iterator1->Valid());
+ delete iterator1;
+
+ dbfull()->TEST_CompactMemTable();
+
+ Iterator* iterator2 = db_->NewIterator(read_options);
+ iterator2->SeekToFirst();
+ ASSERT_TRUE(!iterator2->Valid());
+ delete iterator2;
+
+ db_->ReleaseSnapshot(snapshot);
+ } while (ChangeOptions());
+}
+
TEST(DBTest, GetLevel0Ordering) {
do {
// Check that we process level-0 files in correct order. The code
}
// Step 2: clear level 1 if necessary.
- dbfull()->TEST_CompactRange(1, NULL, NULL);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 1);
// Reopening moves updates to level-0
Reopen(&options);
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 1);
for (int i = 0; i < 5 * kMaxFiles; i++) {
Put("key", value);
ASSERT_LE(TotalTableFiles(), kMaxFiles);
- fprintf(stderr, "after %d: %d files\n", int(i+1), TotalTableFiles());
+ fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
}
}
}
Put("C", "vc");
dbfull()->TEST_CompactMemTable();
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
// Make sparse update
Put("A", "va2");
// Compactions should not cause us to create a situation where
// a file overlaps too much data at the next level.
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
- dbfull()->TEST_CompactRange(1, NULL, NULL);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
}
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
- dbfull()->TEST_CompactRange(0, NULL, NULL);
+ dbfull()->TEST_CompactRange(0, nullptr, nullptr);
}
} while (ChangeOptions());
}
// Write to force compactions
Put("foo", "newvalue1");
for (int i = 0; i < 100; i++) {
- ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
+ ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
}
Put("foo", "newvalue2");
db_->ReleaseSnapshot(snapshot);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
Slice x("x");
- dbfull()->TEST_CompactRange(0, NULL, &x);
+ dbfull()->TEST_CompactRange(0, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GE(NumTableFilesAtLevel(1), 1);
- dbfull()->TEST_CompactRange(1, NULL, &x);
+ dbfull()->TEST_CompactRange(1, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
Slice z("z");
- dbfull()->TEST_CompactRange(last-2, NULL, &z);
+ dbfull()->TEST_CompactRange(last-2, nullptr, &z);
// DEL eliminated, but v1 remains because we aren't compacting that level
// (DEL can be eliminated because v2 hides v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
- dbfull()->TEST_CompactRange(last-1, NULL, NULL);
+ dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- dbfull()->TEST_CompactRange(last-2, NULL, NULL);
+ dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
// DEL kept: "last" file overlaps
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- dbfull()->TEST_CompactRange(last-1, NULL, NULL);
+ dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
ASSERT_EQ("2,1,1", FilesPerLevel());
// Compact away the placeholder files we created initially
- dbfull()->TEST_CompactRange(1, NULL, NULL);
- dbfull()->TEST_CompactRange(2, NULL, NULL);
+ dbfull()->TEST_CompactRange(1, nullptr, nullptr);
+ dbfull()->TEST_CompactRange(2, nullptr, nullptr);
ASSERT_EQ("2", FilesPerLevel());
// Do a memtable compaction. Before bug-fix, the compaction would
TEST(DBTest, L0_CompactionBug_Issue44_b) {
Reopen();
- Put("","");
+ Put("", "");
Reopen();
Delete("e");
- Put("","");
+ Put("", "");
Reopen();
Put("c", "cv");
Reopen();
- Put("","");
+ Put("", "");
Reopen();
- Put("","");
+ Put("", "");
DelayMilliseconds(1000); // Wait for compaction to finish
Reopen();
- Put("d","dv");
+ Put("d", "dv");
Reopen();
- Put("","");
+ Put("", "");
Reopen();
Delete("d");
Delete("b");
ASSERT_EQ("(->)(c->cv)", Contents());
}
+TEST(DBTest, Fflush_Issue474) {
+ static const int kNum = 100000;
+ Random rnd(test::RandomSeed());
+ for (int i = 0; i < kNum; i++) {
+ fflush(nullptr);
+ ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
+ }
+}
+
TEST(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
public:
Options new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
- new_options.filter_policy = NULL; // Cannot use bloom filters
+ new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options);
ASSERT_OK(Put("[10]", "ten"));
// Compact all
MakeTables(1, "a", "z");
ASSERT_EQ("0,1,2", FilesPerLevel());
- db_->CompactRange(NULL, NULL);
+ db_->CompactRange(nullptr, nullptr);
ASSERT_EQ("0,0,1", FilesPerLevel());
}
DestroyDB(dbname, Options());
// Does not exist, and create_if_missing == false: error
- DB* db = NULL;
+ DB* db = nullptr;
Options opts;
opts.create_if_missing = false;
Status s = DB::Open(opts, dbname, &db);
- ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL);
- ASSERT_TRUE(db == NULL);
+ ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
+ ASSERT_TRUE(db == nullptr);
// Does not exist, and create_if_missing == true: OK
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
delete db;
- db = NULL;
+ db = nullptr;
// Does exist, and error_if_exists == true: error
opts.create_if_missing = false;
opts.error_if_exists = true;
s = DB::Open(opts, dbname, &db);
- ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL);
- ASSERT_TRUE(db == NULL);
+ ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
+ ASSERT_TRUE(db == nullptr);
// Does exist, and error_if_exists == false: OK
opts.create_if_missing = true;
opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
+
+ delete db;
+ db = nullptr;
+}
+
+TEST(DBTest, DestroyEmptyDir) {
+ std::string dbname = test::TmpDir() + "/db_empty_dir";
+ TestEnv env(Env::Default());
+ env.DeleteDir(dbname);
+ ASSERT_TRUE(!env.FileExists(dbname));
+
+ Options opts;
+ opts.env = &env;
+
+ ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_TRUE(env.FileExists(dbname));
+ std::vector<std::string> children;
+ ASSERT_OK(env.GetChildren(dbname, &children));
+ // The stock Env's do not filter out '.' and '..' special files.
+ ASSERT_EQ(2, children.size());
+ ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_TRUE(!env.FileExists(dbname));
+
+ // Should also be destroyed if Env is filtering out dot files.
+ env.SetIgnoreDotFiles(true);
+ ASSERT_OK(env.CreateDir(dbname));
+ ASSERT_TRUE(env.FileExists(dbname));
+ ASSERT_OK(env.GetChildren(dbname, &children));
+ ASSERT_EQ(0, children.size());
+ ASSERT_OK(DestroyDB(dbname, opts));
+ ASSERT_TRUE(!env.FileExists(dbname));
+}
+
+TEST(DBTest, DestroyOpenDB) {
+ std::string dbname = test::TmpDir() + "/open_db_dir";
+ env_->DeleteDir(dbname);
+ ASSERT_TRUE(!env_->FileExists(dbname));
+
+ Options opts;
+ opts.create_if_missing = true;
+ DB* db = nullptr;
+ ASSERT_OK(DB::Open(opts, dbname, &db));
+ ASSERT_TRUE(db != nullptr);
+
+ // Must fail to destroy an open db.
+ ASSERT_TRUE(env_->FileExists(dbname));
+ ASSERT_TRUE(!DestroyDB(dbname, Options()).ok());
+ ASSERT_TRUE(env_->FileExists(dbname));
delete db;
- db = NULL;
+ db = nullptr;
+
+ // Should succeed destroying a closed db.
+ ASSERT_OK(DestroyDB(dbname, Options()));
+ ASSERT_TRUE(!env_->FileExists(dbname));
}
TEST(DBTest, Locking) {
- DB* db2 = NULL;
+ DB* db2 = nullptr;
Status s = DB::Open(CurrentOptions(), dbname_, &db2);
ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
}
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
- env_->no_space_.Release_Store(env_); // Force out-of-space errors
+ // Force out-of-space errors.
+ env_->no_space_.store(true, std::memory_order_release);
for (int i = 0; i < 10; i++) {
for (int level = 0; level < config::kNumLevels-1; level++) {
- dbfull()->TEST_CompactRange(level, NULL, NULL);
+ dbfull()->TEST_CompactRange(level, nullptr, nullptr);
}
}
- env_->no_space_.Release_Store(NULL);
+ env_->no_space_.store(false, std::memory_order_release);
ASSERT_LT(CountFiles(), num_files + 3);
}
options.env = env_;
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
- env_->non_writable_.Release_Store(env_); // Force errors for new files
+ // Force errors for new files.
+ env_->non_writable_.store(true, std::memory_order_release);
std::string big(100000, 'x');
int errors = 0;
for (int i = 0; i < 20; i++) {
}
}
ASSERT_GT(errors, 0);
- env_->non_writable_.Release_Store(NULL);
+ env_->non_writable_.store(false, std::memory_order_release);
}
TEST(DBTest, WriteSyncError) {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
- env_->data_sync_error_.Release_Store(env_);
+ env_->data_sync_error_.store(true, std::memory_order_release);
// (b) Normal write should succeed
WriteOptions w;
ASSERT_EQ("NOT_FOUND", Get("k2"));
// (d) make sync behave normally
- env_->data_sync_error_.Release_Store(NULL);
+ env_->data_sync_error_.store(false, std::memory_order_release);
// (e) Do a non-sync write; should fail
w.sync = false;
// We iterate twice. In the second iteration, everything is the
// same except the log record never makes it to the MANIFEST file.
for (int iter = 0; iter < 2; iter++) {
- port::AtomicPointer* error_type = (iter == 0)
+ std::atomic<bool>* error_type = (iter == 0)
? &env_->manifest_sync_error_
: &env_->manifest_write_error_;
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
// Merging compaction (will fail)
- error_type->Release_Store(env_);
- dbfull()->TEST_CompactRange(last, NULL, NULL); // Should fail
+ error_type->store(true, std::memory_order_release);
+ dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
ASSERT_EQ("bar", Get("foo"));
// Recovery: should not lose data
- error_type->Release_Store(NULL);
+ error_type->store(false, std::memory_order_release);
Reopen(&options);
ASSERT_EQ("bar", Get("foo"));
}
dbfull()->TEST_CompactMemTable();
// Prevent auto compactions triggered by seeks
- env_->delay_data_sync_.Release_Store(env_);
+ env_->delay_data_sync_.store(true, std::memory_order_release);
// Lookup present keys. Should rarely read from small sstable.
env_->random_read_counter_.Reset();
fprintf(stderr, "%d missing => %d reads\n", N, reads);
ASSERT_LE(reads, 3*N/100);
- env_->delay_data_sync_.Release_Store(NULL);
+ env_->delay_data_sync_.store(false, std::memory_order_release);
Close();
delete options.block_cache;
delete options.filter_policy;
struct MTState {
DBTest* test;
- port::AtomicPointer stop;
- port::AtomicPointer counter[kNumThreads];
- port::AtomicPointer thread_done[kNumThreads];
+ std::atomic<bool> stop;
+ std::atomic<int> counter[kNumThreads];
+ std::atomic<bool> thread_done[kNumThreads];
};
struct MTThread {
MTThread* t = reinterpret_cast<MTThread*>(arg);
int id = t->id;
DB* db = t->state->test->db_;
- uintptr_t counter = 0;
+ int counter = 0;
fprintf(stderr, "... starting thread %d\n", id);
Random rnd(1000 + id);
std::string value;
char valbuf[1500];
- while (t->state->stop.Acquire_Load() == NULL) {
- t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
+ while (!t->state->stop.load(std::memory_order_acquire)) {
+ t->state->counter[id].store(counter, std::memory_order_release);
int key = rnd.Uniform(kNumKeys);
char keybuf[20];
ASSERT_EQ(k, key);
ASSERT_GE(w, 0);
ASSERT_LT(w, kNumThreads);
- ASSERT_LE(static_cast<uintptr_t>(c), reinterpret_cast<uintptr_t>(
- t->state->counter[w].Acquire_Load()));
+ ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
}
}
counter++;
}
- t->state->thread_done[id].Release_Store(t);
- fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
+ t->state->thread_done[id].store(true, std::memory_order_release);
+ fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
}
} // namespace
// Initialize state
MTState mt;
mt.test = this;
- mt.stop.Release_Store(0);
+ mt.stop.store(false, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
- mt.counter[id].Release_Store(0);
- mt.thread_done[id].Release_Store(0);
+ mt.counter[id].store(false, std::memory_order_release);
+ mt.thread_done[id].store(false, std::memory_order_release);
}
// Start threads
DelayMilliseconds(kTestSeconds * 1000);
// Stop the threads and wait for them to finish
- mt.stop.Release_Store(&mt);
+ mt.stop.store(true, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
- while (mt.thread_done[id].Acquire_Load() == NULL) {
+ while (!mt.thread_done[id].load(std::memory_order_acquire)) {
DelayMilliseconds(100);
}
}
return Status::NotFound(key);
}
virtual Iterator* NewIterator(const ReadOptions& options) {
- if (options.snapshot == NULL) {
+ if (options.snapshot == nullptr) {
KVMap* saved = new KVMap;
*saved = map_;
return new ModelIter(saved, true);
virtual Slice key() const { return iter_->first; }
virtual Slice value() const { return iter_->second; }
virtual Status status() const { return Status::OK(); }
+
private:
const KVMap* const map_;
const bool owned_; // Do we own map_
KVMap map_;
};
-static std::string RandomKey(Random* rnd) {
- int len = (rnd->OneIn(3)
- ? 1 // Short sometimes to encourage collisions
- : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
- return test::RandomKey(rnd, len);
-}
-
static bool CompareIterators(int step,
DB* model,
DB* db,
do {
ModelDB model(CurrentOptions());
const int N = 10000;
- const Snapshot* model_snap = NULL;
- const Snapshot* db_snap = NULL;
+ const Snapshot* model_snap = nullptr;
+ const Snapshot* db_snap = nullptr;
std::string k, v;
for (int step = 0; step < N; step++) {
if (step % 100 == 0) {
}
if ((step % 100) == 0) {
- ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
+ ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
// Save a snapshot from each DB this time that we'll use next
// time we compare things, to make sure the current state is
// preserved with the snapshot
- if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
- if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
+ if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
+ if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
Reopen();
- ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
+ ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
model_snap = model.GetSnapshot();
db_snap = db_->GetSnapshot();
}
}
- if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
- if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
+ if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
+ if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
} while (ChangeOptions());
}
std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
DestroyDB(dbname, Options());
- DB* db = NULL;
+ DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
Status s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
- ASSERT_TRUE(db != NULL);
+ ASSERT_TRUE(db != nullptr);
delete db;
- db = NULL;
+ db = nullptr;
Env* env = Env::Default();
InternalKeyComparator cmp(BytewiseComparator());
Options options;
- VersionSet vset(dbname, &options, NULL, &cmp);
+ VersionSet vset(dbname, &options, nullptr, &cmp);
bool save_manifest;
ASSERT_OK(vset.Recover(&save_manifest));
VersionEdit vbase;
}
// Append the serialization of "key" to *result.
-extern void AppendInternalKey(std::string* result,
- const ParsedInternalKey& key);
+void AppendInternalKey(std::string* result, const ParsedInternalKey& key);
// Attempt to parse an internal key from "internal_key". On success,
// stores the parsed data in "*result", and returns true.
//
// On error, returns false, leaves "*result" in an undefined state.
-extern bool ParseInternalKey(const Slice& internal_key,
- ParsedInternalKey* result);
+bool ParseInternalKey(const Slice& internal_key, ParsedInternalKey* result);
// Returns the user key portion of an internal key.
inline Slice ExtractUserKey(const Slice& internal_key) {
return Slice(internal_key.data(), internal_key.size() - 8);
}
-inline ValueType ExtractValueType(const Slice& internal_key) {
- assert(internal_key.size() >= 8);
- const size_t n = internal_key.size();
- uint64_t num = DecodeFixed64(internal_key.data() + n - 8);
- unsigned char c = num & 0xff;
- return static_cast<ValueType>(c);
-}
-
// A comparator for internal keys that uses a specified comparator for
// the user key portion and breaks ties by decreasing sequence number.
class InternalKeyComparator : public Comparator {
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "leveldb/dumpfile.h"
+
#include <stdio.h>
+
#include "db/dbformat.h"
#include "db/filename.h"
#include "db/log_reader.h"
Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) {
uint64_t file_size;
- RandomAccessFile* file = NULL;
- Table* table = NULL;
+ RandomAccessFile* file = nullptr;
+ Table* table = nullptr;
Status s = env->GetFileSize(fname, &file_size);
if (s.ok()) {
s = env->NewRandomAccessFile(fname, &file);
// the last "sync". It then checks for data loss errors by purposely dropping
// file data (or entire files) not protected by a "sync".
-#include "leveldb/db.h"
-
#include <map>
#include <set>
+
+#include "leveldb/db.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
#include "leveldb/env.h"
#include "leveldb/table.h"
#include "leveldb/write_batch.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/testharness.h"
namespace {
// Assume a filename, and not a directory name like "/foo/bar/"
-static std::string GetDirName(const std::string filename) {
+static std::string GetDirName(const std::string& filename) {
size_t found = filename.find_last_of("/\\");
if (found == std::string::npos) {
return "";
struct FileState {
std::string filename_;
- ssize_t pos_;
- ssize_t pos_at_last_sync_;
- ssize_t pos_at_last_flush_;
+ int64_t pos_;
+ int64_t pos_at_last_sync_;
+ int64_t pos_at_last_flush_;
FileState(const std::string& filename)
: filename_(filename),
class FaultInjectionTestEnv : public EnvWrapper {
public:
- FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {}
+ FaultInjectionTestEnv()
+ : EnvWrapper(Env::Default()), filesystem_active_(true) {}
virtual ~FaultInjectionTestEnv() { }
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result);
// system reset. Setting to inactive will freeze our saved filesystem state so
// that it will stop being recorded. It can then be reset back to the state at
// the time of the reset.
- bool IsFilesystemActive() const { return filesystem_active_; }
- void SetFilesystemActive(bool active) { filesystem_active_ = active; }
+ bool IsFilesystemActive() LOCKS_EXCLUDED(mutex_) {
+ MutexLock l(&mutex_);
+ return filesystem_active_;
+ }
+ void SetFilesystemActive(bool active) LOCKS_EXCLUDED(mutex_) {
+ MutexLock l(&mutex_);
+ filesystem_active_ = active;
+ }
private:
port::Mutex mutex_;
- std::map<std::string, FileState> db_file_state_;
- std::set<std::string> new_files_since_last_dir_sync_;
- bool filesystem_active_; // Record flushes, syncs, writes
+ std::map<std::string, FileState> db_file_state_ GUARDED_BY(mutex_);
+ std::set<std::string> new_files_since_last_dir_sync_ GUARDED_BY(mutex_);
+ bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes
};
TestWritableFile::TestWritableFile(const FileState& state,
target_(f),
writable_file_opened_(true),
env_(env) {
- assert(f != NULL);
+ assert(f != nullptr);
}
TestWritableFile::~TestWritableFile() {
// Since we are not destroying the database, the existing files
// should keep their recorded synced/flushed state. Therefore
// we do not reset db_file_state_ and new_files_since_last_dir_sync_.
- MutexLock l(&mutex_);
SetFilesystemActive(true);
}
}
Status FileState::DropUnsyncedData() const {
- ssize_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
+ int64_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_;
return Truncate(filename_, sync_pos);
}
FaultInjectionTest()
: env_(new FaultInjectionTestEnv),
tiny_cache_(NewLRUCache(100)),
- db_(NULL) {
+ db_(nullptr) {
dbname_ = test::TmpDir() + "/fault_test";
DestroyDB(dbname_, Options()); // Destroy any db from earlier run
options_.reuse_logs = true;
Status OpenDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
env_->ResetState();
return DB::Open(options_, dbname_, &db_);
}
void CloseDB() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
void DeleteAllData() {
Iterator* iter = db_->NewIterator(ReadOptions());
- WriteOptions options;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
}
void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) {
DeleteAllData();
Build(0, num_pre_sync);
- db_->CompactRange(NULL, NULL);
+ db_->CompactRange(nullptr, nullptr);
Build(num_pre_sync, num_post_sync);
}
namespace leveldb {
// A utility routine: write "data" to the named file and Sync() it.
-extern Status WriteStringToFileSync(Env* env, const Slice& data,
- const std::string& fname);
+Status WriteStringToFileSync(Env* env, const Slice& data,
+ const std::string& fname);
-static std::string MakeFileName(const std::string& name, uint64_t number,
+static std::string MakeFileName(const std::string& dbname, uint64_t number,
const char* suffix) {
char buf[100];
snprintf(buf, sizeof(buf), "/%06llu.%s",
static_cast<unsigned long long>(number),
suffix);
- return name + buf;
+ return dbname + buf;
}
-std::string LogFileName(const std::string& name, uint64_t number) {
+std::string LogFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "log");
+ return MakeFileName(dbname, number, "log");
}
-std::string TableFileName(const std::string& name, uint64_t number) {
+std::string TableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "ldb");
+ return MakeFileName(dbname, number, "ldb");
}
-std::string SSTTableFileName(const std::string& name, uint64_t number) {
+std::string SSTTableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
- return MakeFileName(name, number, "sst");
+ return MakeFileName(dbname, number, "sst");
}
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
// dbname/LOG.old
// dbname/MANIFEST-[0-9]+
// dbname/[0-9]+.(log|sst|ldb)
-bool ParseFileName(const std::string& fname,
+bool ParseFileName(const std::string& filename,
uint64_t* number,
FileType* type) {
- Slice rest(fname);
+ Slice rest(filename);
if (rest == "CURRENT") {
*number = 0;
*type = kCurrentFile;
// Return the name of the log file with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string LogFileName(const std::string& dbname, uint64_t number);
+std::string LogFileName(const std::string& dbname, uint64_t number);
// Return the name of the sstable with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string TableFileName(const std::string& dbname, uint64_t number);
+std::string TableFileName(const std::string& dbname, uint64_t number);
// Return the legacy file name for an sstable with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
-extern std::string SSTTableFileName(const std::string& dbname, uint64_t number);
+std::string SSTTableFileName(const std::string& dbname, uint64_t number);
// Return the name of the descriptor file for the db named by
// "dbname" and the specified incarnation number. The result will be
// prefixed with "dbname".
-extern std::string DescriptorFileName(const std::string& dbname,
- uint64_t number);
+std::string DescriptorFileName(const std::string& dbname, uint64_t number);
// Return the name of the current file. This file contains the name
// of the current manifest file. The result will be prefixed with
// "dbname".
-extern std::string CurrentFileName(const std::string& dbname);
+std::string CurrentFileName(const std::string& dbname);
// Return the name of the lock file for the db named by
// "dbname". The result will be prefixed with "dbname".
-extern std::string LockFileName(const std::string& dbname);
+std::string LockFileName(const std::string& dbname);
// Return the name of a temporary file owned by the db named "dbname".
// The result will be prefixed with "dbname".
-extern std::string TempFileName(const std::string& dbname, uint64_t number);
+std::string TempFileName(const std::string& dbname, uint64_t number);
// Return the name of the info log file for "dbname".
-extern std::string InfoLogFileName(const std::string& dbname);
+std::string InfoLogFileName(const std::string& dbname);
// Return the name of the old info log file for "dbname".
-extern std::string OldInfoLogFileName(const std::string& dbname);
+std::string OldInfoLogFileName(const std::string& dbname);
// If filename is a leveldb file, store the type of the file in *type.
// The number encoded in the filename is stored in *number. If the
// filename was successfully parsed, returns true. Else return false.
-extern bool ParseFileName(const std::string& filename,
- uint64_t* number,
- FileType* type);
+bool ParseFileName(const std::string& filename,
+ uint64_t* number,
+ FileType* type);
// Make the CURRENT file point to the descriptor file with the
// specified number.
-extern Status SetCurrentFile(Env* env, const std::string& dbname,
- uint64_t descriptor_number);
-
+Status SetCurrentFile(Env* env, const std::string& dbname,
+ uint64_t descriptor_number);
} // namespace leveldb
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(999, number);
ASSERT_EQ(kTempFile, type);
+
+ fname = InfoLogFileName("foo");
+ ASSERT_EQ("foo/", std::string(fname.data(), 4));
+ ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
+ ASSERT_EQ(0, number);
+ ASSERT_EQ(kInfoLogFile, type);
+
+ fname = OldInfoLogFileName("foo");
+ ASSERT_EQ("foo/", std::string(fname.data(), 4));
+ ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
+ ASSERT_EQ(0, number);
+ ASSERT_EQ(kInfoLogFile, type);
}
} // namespace leveldb
}
bool Reader::SkipToInitialBlock() {
- size_t offset_in_block = initial_offset_ % kBlockSize;
+ const size_t offset_in_block = initial_offset_ % kBlockSize;
uint64_t block_start_location = initial_offset_ - offset_in_block;
// Don't search a block if we'd be in the trailer
if (offset_in_block > kBlockSize - 6) {
- offset_in_block = 0;
block_start_location += kBlockSize;
}
// it could emit an empty kFirstType record at the tail end
// of a block followed by a kFullType or kFirstType record
// at the beginning of the next block.
- if (scratch->empty()) {
- in_fragmented_record = false;
- } else {
+ if (!scratch->empty()) {
ReportCorruption(scratch->size(), "partial record without end(1)");
}
}
// it could emit an empty kFirstType record at the tail end
// of a block followed by a kFullType or kFirstType record
// at the beginning of the next block.
- if (scratch->empty()) {
- in_fragmented_record = false;
- } else {
+ if (!scratch->empty()) {
ReportCorruption(scratch->size(), "partial record without end(2)");
}
}
}
void Reader::ReportDrop(uint64_t bytes, const Status& reason) {
- if (reporter_ != NULL &&
+ if (reporter_ != nullptr &&
end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) {
reporter_->Corruption(static_cast<size_t>(bytes), reason);
}
// Create a reader that will return log records from "*file".
// "*file" must remain live while this Reader is in use.
//
- // If "reporter" is non-NULL, it is notified whenever some data is
+ // If "reporter" is non-null, it is notified whenever some data is
// dropped due to a detected corruption. "*reporter" must remain
// live while this Reader is in use.
//
p += 8;
p = EncodeVarint32(p, val_size);
memcpy(p, value.data(), val_size);
- assert((p + val_size) - buf == encoded_len);
+ assert(p + val_size == buf + encoded_len);
table_.Insert(buf);
}
namespace leveldb {
class InternalKeyComparator;
-class Mutex;
class MemTableIterator;
class MemTable {
class RecoveryTest {
public:
- RecoveryTest() : env_(Env::Default()), db_(NULL) {
+ RecoveryTest() : env_(Env::Default()), db_(nullptr) {
dbname_ = test::TmpDir() + "/recovery_test";
DestroyDB(dbname_, Options());
Open();
void Close() {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
}
- void Open(Options* options = NULL) {
+ Status OpenWithStatus(Options* options = nullptr) {
Close();
Options opts;
- if (options != NULL) {
+ if (options != nullptr) {
opts = *options;
} else {
opts.reuse_logs = true; // TODO(sanjay): test both ways
opts.create_if_missing = true;
}
- if (opts.env == NULL) {
+ if (opts.env == nullptr) {
opts.env = env_;
}
- ASSERT_OK(DB::Open(opts, dbname_, &db_));
+ return DB::Open(opts, dbname_, &db_);
+ }
+
+ void Open(Options* options = nullptr) {
+ ASSERT_OK(OpenWithStatus(options));
ASSERT_EQ(1, NumLogs());
}
return db_->Put(WriteOptions(), k, v);
}
- std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
+ std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
std::string result;
Status s = db_->Get(ReadOptions(), k, &result);
if (s.IsNotFound()) {
}
size_t DeleteLogFiles() {
+ // Linux allows unlinking open files, but Windows does not.
+ // Closing the db allows for file deletion.
+ Close();
std::vector<uint64_t> logs = GetFiles(kLogFile);
for (size_t i = 0; i < logs.size(); i++) {
ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]);
return logs.size();
}
+ void DeleteManifestFile() {
+ ASSERT_OK(env_->DeleteFile(ManifestFileName()));
+ }
+
uint64_t FirstLogFile() {
return GetFiles(kLogFile)[0];
}
ASSERT_EQ("there", Get("hi"));
}
+TEST(RecoveryTest, ManifestMissing) {
+ ASSERT_OK(Put("foo", "bar"));
+ Close();
+ DeleteManifestFile();
+
+ Status status = OpenWithStatus();
+ ASSERT_TRUE(status.IsCorruption());
+}
+
} // namespace leveldb
int main(int argc, char** argv) {
owns_cache_(options_.block_cache != options.block_cache),
next_file_number_(1) {
// TableCache can be small since we expect each table to be opened once.
- table_cache_ = new TableCache(dbname_, &options_, 10);
+ table_cache_ = new TableCache(dbname_, options_, 10);
}
~Repairer() {
status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
delete iter;
mem->Unref();
- mem = NULL;
+ mem = nullptr;
if (status.ok()) {
if (meta.file_size > 0) {
table_numbers_.push_back(meta.number);
}
}
delete builder;
- builder = NULL;
+ builder = nullptr;
if (s.ok()) {
s = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (counter > 0 && s.ok()) {
std::string orig = TableFileName(dbname_, t.meta.number);
status = file->Close();
}
delete file;
- file = NULL;
+ file = nullptr;
if (!status.ok()) {
env_->DeleteFile(tmp);
// dir/lost/foo
const char* slash = strrchr(fname.c_str(), '/');
std::string new_dir;
- if (slash != NULL) {
+ if (slash != nullptr) {
new_dir.assign(fname.data(), slash - fname.data());
}
new_dir.append("/lost");
env_->CreateDir(new_dir); // Ignore error
std::string new_file = new_dir;
new_file.append("/");
- new_file.append((slash == NULL) ? fname.c_str() : slash + 1);
+ new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
Status s = env_->RenameFile(fname, new_file);
Log(options_.info_log, "Archiving %s: %s\n",
fname.c_str(), s.ToString().c_str());
//
// ... prev vs. next pointer ordering ...
-#include <assert.h>
-#include <stdlib.h>
-#include "port/port.h"
+#include <atomic>
+#include <cassert>
+#include <cstdlib>
+
#include "util/arena.h"
#include "util/random.h"
// Modified only by Insert(). Read racily by readers, but stale
// values are ok.
- port::AtomicPointer max_height_; // Height of the entire list
+ std::atomic<int> max_height_; // Height of the entire list
inline int GetMaxHeight() const {
- return static_cast<int>(
- reinterpret_cast<intptr_t>(max_height_.NoBarrier_Load()));
+ return max_height_.load(std::memory_order_relaxed);
}
// Read/written only by Insert().
bool KeyIsAfterNode(const Key& key, Node* n) const;
// Return the earliest node that comes at or after key.
- // Return NULL if there is no such node.
+ // Return nullptr if there is no such node.
//
- // If prev is non-NULL, fills prev[level] with pointer to previous
+ // If prev is non-null, fills prev[level] with pointer to previous
// node at "level" for every level in [0..max_height_-1].
Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
// Implementation details follow
template<typename Key, class Comparator>
-struct SkipList<Key,Comparator>::Node {
+struct SkipList<Key, Comparator>::Node {
explicit Node(const Key& k) : key(k) { }
Key const key;
assert(n >= 0);
// Use an 'acquire load' so that we observe a fully initialized
// version of the returned Node.
- return reinterpret_cast<Node*>(next_[n].Acquire_Load());
+ return next_[n].load(std::memory_order_acquire);
}
void SetNext(int n, Node* x) {
assert(n >= 0);
// Use a 'release store' so that anybody who reads through this
// pointer observes a fully initialized version of the inserted node.
- next_[n].Release_Store(x);
+ next_[n].store(x, std::memory_order_release);
}
// No-barrier variants that can be safely used in a few locations.
Node* NoBarrier_Next(int n) {
assert(n >= 0);
- return reinterpret_cast<Node*>(next_[n].NoBarrier_Load());
+ return next_[n].load(std::memory_order_relaxed);
}
void NoBarrier_SetNext(int n, Node* x) {
assert(n >= 0);
- next_[n].NoBarrier_Store(x);
+ next_[n].store(x, std::memory_order_relaxed);
}
private:
// Array of length equal to the node height. next_[0] is lowest level link.
- port::AtomicPointer next_[1];
+ std::atomic<Node*> next_[1];
};
template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node*
-SkipList<Key,Comparator>::NewNode(const Key& key, int height) {
- char* mem = arena_->AllocateAligned(
- sizeof(Node) + sizeof(port::AtomicPointer) * (height - 1));
- return new (mem) Node(key);
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::NewNode(const Key& key, int height) {
+ char* const node_memory = arena_->AllocateAligned(
+ sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
+ return new (node_memory) Node(key);
}
template<typename Key, class Comparator>
-inline SkipList<Key,Comparator>::Iterator::Iterator(const SkipList* list) {
+inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
list_ = list;
- node_ = NULL;
+ node_ = nullptr;
}
template<typename Key, class Comparator>
-inline bool SkipList<Key,Comparator>::Iterator::Valid() const {
- return node_ != NULL;
+inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
+ return node_ != nullptr;
}
template<typename Key, class Comparator>
-inline const Key& SkipList<Key,Comparator>::Iterator::key() const {
+inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
assert(Valid());
return node_->key;
}
template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Next() {
+inline void SkipList<Key, Comparator>::Iterator::Next() {
assert(Valid());
node_ = node_->Next(0);
}
template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Prev() {
+inline void SkipList<Key, Comparator>::Iterator::Prev() {
// Instead of using explicit "prev" links, we just search for the
// last node that falls before key.
assert(Valid());
node_ = list_->FindLessThan(node_->key);
if (node_ == list_->head_) {
- node_ = NULL;
+ node_ = nullptr;
}
}
template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::Seek(const Key& target) {
- node_ = list_->FindGreaterOrEqual(target, NULL);
+inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
+ node_ = list_->FindGreaterOrEqual(target, nullptr);
}
template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::SeekToFirst() {
+inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
node_ = list_->head_->Next(0);
}
template<typename Key, class Comparator>
-inline void SkipList<Key,Comparator>::Iterator::SeekToLast() {
+inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
node_ = list_->FindLast();
if (node_ == list_->head_) {
- node_ = NULL;
+ node_ = nullptr;
}
}
template<typename Key, class Comparator>
-int SkipList<Key,Comparator>::RandomHeight() {
+int SkipList<Key, Comparator>::RandomHeight() {
// Increase height with probability 1 in kBranching
static const unsigned int kBranching = 4;
int height = 1;
}
template<typename Key, class Comparator>
-bool SkipList<Key,Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
- // NULL n is considered infinite
- return (n != NULL) && (compare_(n->key, key) < 0);
+bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
+ // null n is considered infinite
+ return (n != nullptr) && (compare_(n->key, key) < 0);
}
template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindGreaterOrEqual(const Key& key, Node** prev)
- const {
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
+ Node** prev) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
// Keep searching in this list
x = next;
} else {
- if (prev != NULL) prev[level] = x;
+ if (prev != nullptr) prev[level] = x;
if (level == 0) {
return next;
} else {
}
template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node*
-SkipList<Key,Comparator>::FindLessThan(const Key& key) const {
+typename SkipList<Key, Comparator>::Node*
+SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
assert(x == head_ || compare_(x->key, key) < 0);
Node* next = x->Next(level);
- if (next == NULL || compare_(next->key, key) >= 0) {
+ if (next == nullptr || compare_(next->key, key) >= 0) {
if (level == 0) {
return x;
} else {
}
template<typename Key, class Comparator>
-typename SkipList<Key,Comparator>::Node* SkipList<Key,Comparator>::FindLast()
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
Node* next = x->Next(level);
- if (next == NULL) {
+ if (next == nullptr) {
if (level == 0) {
return x;
} else {
}
template<typename Key, class Comparator>
-SkipList<Key,Comparator>::SkipList(Comparator cmp, Arena* arena)
+SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
: compare_(cmp),
arena_(arena),
head_(NewNode(0 /* any key will do */, kMaxHeight)),
- max_height_(reinterpret_cast<void*>(1)),
+ max_height_(1),
rnd_(0xdeadbeef) {
for (int i = 0; i < kMaxHeight; i++) {
- head_->SetNext(i, NULL);
+ head_->SetNext(i, nullptr);
}
}
template<typename Key, class Comparator>
-void SkipList<Key,Comparator>::Insert(const Key& key) {
+void SkipList<Key, Comparator>::Insert(const Key& key) {
// TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
// here since Insert() is externally synchronized.
Node* prev[kMaxHeight];
Node* x = FindGreaterOrEqual(key, prev);
// Our data structure does not allow duplicate insertion
- assert(x == NULL || !Equal(key, x->key));
+ assert(x == nullptr || !Equal(key, x->key));
int height = RandomHeight();
if (height > GetMaxHeight()) {
for (int i = GetMaxHeight(); i < height; i++) {
prev[i] = head_;
}
- //fprintf(stderr, "Change height from %d to %d\n", max_height_, height);
-
// It is ok to mutate max_height_ without any synchronization
// with concurrent readers. A concurrent reader that observes
// the new value of max_height_ will see either the old value of
- // new level pointers from head_ (NULL), or a new value set in
+ // new level pointers from head_ (nullptr), or a new value set in
// the loop below. In the former case the reader will
- // immediately drop to the next level since NULL sorts after all
+ // immediately drop to the next level since nullptr sorts after all
// keys. In the latter case the reader will use the new node.
- max_height_.NoBarrier_Store(reinterpret_cast<void*>(height));
+ max_height_.store(height, std::memory_order_relaxed);
}
x = NewNode(key, height);
}
template<typename Key, class Comparator>
-bool SkipList<Key,Comparator>::Contains(const Key& key) const {
- Node* x = FindGreaterOrEqual(key, NULL);
- if (x != NULL && Equal(key, x->key)) {
+bool SkipList<Key, Comparator>::Contains(const Key& key) const {
+ Node* x = FindGreaterOrEqual(key, nullptr);
+ if (x != nullptr && Equal(key, x->key)) {
return true;
} else {
return false;
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/skiplist.h"
+
+#include <atomic>
#include <set>
+
#include "leveldb/env.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/arena.h"
#include "util/hash.h"
#include "util/random.h"
// concurrent readers (with no synchronization other than when a
// reader's iterator is created), the reader always observes all the
// data that was present in the skip list when the iterator was
-// constructor. Because insertions are happening concurrently, we may
+// constructed. Because insertions are happening concurrently, we may
// also observe new values that were inserted since the iterator was
// constructed, but we should never miss any values that were present
// at iterator construction time.
// Per-key generation
struct State {
- port::AtomicPointer generation[K];
- void Set(int k, intptr_t v) {
- generation[k].Release_Store(reinterpret_cast<void*>(v));
+ std::atomic<int> generation[K];
+ void Set(int k, int v) {
+ generation[k].store(v, std::memory_order_release);
}
- intptr_t Get(int k) {
- return reinterpret_cast<intptr_t>(generation[k].Acquire_Load());
+ int Get(int k) {
+ return generation[k].load(std::memory_order_acquire);
}
State() {
public:
ConcurrentTest t_;
int seed_;
- port::AtomicPointer quit_flag_;
+ std::atomic<bool> quit_flag_;
enum ReaderState {
STARTING,
explicit TestState(int s)
: seed_(s),
- quit_flag_(NULL),
+ quit_flag_(false),
state_(STARTING),
state_cv_(&mu_) {}
- void Wait(ReaderState s) {
+ void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
while (state_ != s) {
state_cv_.Wait();
mu_.Unlock();
}
- void Change(ReaderState s) {
+ void Change(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
state_ = s;
state_cv_.Signal();
private:
port::Mutex mu_;
- ReaderState state_;
- port::CondVar state_cv_;
+ ReaderState state_ GUARDED_BY(mu_);
+ port::CondVar state_cv_ GUARDED_BY(mu_);
};
static void ConcurrentReader(void* arg) {
Random rnd(state->seed_);
int64_t reads = 0;
state->Change(TestState::RUNNING);
- while (!state->quit_flag_.Acquire_Load()) {
+ while (!state->quit_flag_.load(std::memory_order_acquire)) {
state->t_.ReadStep(&rnd);
++reads;
}
for (int i = 0; i < kSize; i++) {
state.t_.WriteStep(&rnd);
}
- state.quit_flag_.Release_Store(&state); // Any non-NULL arg will do
+ state.quit_flag_.store(true, std::memory_order_release);
state.Wait(TestState::DONE);
}
}
// Each SnapshotImpl corresponds to a particular sequence number.
class SnapshotImpl : public Snapshot {
public:
- SequenceNumber number_; // const after creation
+ SnapshotImpl(SequenceNumber sequence_number)
+ : sequence_number_(sequence_number) {}
+
+ SequenceNumber sequence_number() const { return sequence_number_; }
private:
friend class SnapshotList;
- // SnapshotImpl is kept in a doubly-linked circular list
+ // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList
+ // implementation operates on the next/previous fields direcly.
SnapshotImpl* prev_;
SnapshotImpl* next_;
- SnapshotList* list_; // just for sanity checks
+ const SequenceNumber sequence_number_;
+
+#if !defined(NDEBUG)
+ SnapshotList* list_ = nullptr;
+#endif // !defined(NDEBUG)
};
class SnapshotList {
public:
- SnapshotList() {
- list_.prev_ = &list_;
- list_.next_ = &list_;
+ SnapshotList() : head_(0) {
+ head_.prev_ = &head_;
+ head_.next_ = &head_;
}
- bool empty() const { return list_.next_ == &list_; }
- SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; }
- SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; }
-
- const SnapshotImpl* New(SequenceNumber seq) {
- SnapshotImpl* s = new SnapshotImpl;
- s->number_ = seq;
- s->list_ = this;
- s->next_ = &list_;
- s->prev_ = list_.prev_;
- s->prev_->next_ = s;
- s->next_->prev_ = s;
- return s;
+ bool empty() const { return head_.next_ == &head_; }
+ SnapshotImpl* oldest() const { assert(!empty()); return head_.next_; }
+ SnapshotImpl* newest() const { assert(!empty()); return head_.prev_; }
+
+ // Creates a SnapshotImpl and appends it to the end of the list.
+ SnapshotImpl* New(SequenceNumber sequence_number) {
+ assert(empty() || newest()->sequence_number_ <= sequence_number);
+
+ SnapshotImpl* snapshot = new SnapshotImpl(sequence_number);
+
+#if !defined(NDEBUG)
+ snapshot->list_ = this;
+#endif // !defined(NDEBUG)
+ snapshot->next_ = &head_;
+ snapshot->prev_ = head_.prev_;
+ snapshot->prev_->next_ = snapshot;
+ snapshot->next_->prev_ = snapshot;
+ return snapshot;
}
- void Delete(const SnapshotImpl* s) {
- assert(s->list_ == this);
- s->prev_->next_ = s->next_;
- s->next_->prev_ = s->prev_;
- delete s;
+ // Removes a SnapshotImpl from this list.
+ //
+ // The snapshot must have been created by calling New() on this list.
+ //
+ // The snapshot pointer should not be const, because its memory is
+ // deallocated. However, that would force us to change DB::ReleaseSnapshot(),
+ // which is in the API, and currently takes a const Snapshot.
+ void Delete(const SnapshotImpl* snapshot) {
+#if !defined(NDEBUG)
+ assert(snapshot->list_ == this);
+#endif // !defined(NDEBUG)
+ snapshot->prev_->next_ = snapshot->next_;
+ snapshot->next_->prev_ = snapshot->prev_;
+ delete snapshot;
}
private:
// Dummy head of doubly-linked list of snapshots
- SnapshotImpl list_;
+ SnapshotImpl head_;
};
} // namespace leveldb
}
TableCache::TableCache(const std::string& dbname,
- const Options* options,
+ const Options& options,
int entries)
- : env_(options->env),
+ : env_(options.env),
dbname_(dbname),
options_(options),
cache_(NewLRUCache(entries)) {
EncodeFixed64(buf, file_number);
Slice key(buf, sizeof(buf));
*handle = cache_->Lookup(key);
- if (*handle == NULL) {
+ if (*handle == nullptr) {
std::string fname = TableFileName(dbname_, file_number);
- RandomAccessFile* file = NULL;
- Table* table = NULL;
+ RandomAccessFile* file = nullptr;
+ Table* table = nullptr;
s = env_->NewRandomAccessFile(fname, &file);
if (!s.ok()) {
std::string old_fname = SSTTableFileName(dbname_, file_number);
}
}
if (s.ok()) {
- s = Table::Open(*options_, file, file_size, &table);
+ s = Table::Open(options_, file, file_size, &table);
}
if (!s.ok()) {
- assert(table == NULL);
+ assert(table == nullptr);
delete file;
// We do not cache error results so that if the error is transient,
// or somebody repairs the file, we recover automatically.
uint64_t file_number,
uint64_t file_size,
Table** tableptr) {
- if (tableptr != NULL) {
- *tableptr = NULL;
+ if (tableptr != nullptr) {
+ *tableptr = nullptr;
}
- Cache::Handle* handle = NULL;
+ Cache::Handle* handle = nullptr;
Status s = FindTable(file_number, file_size, &handle);
if (!s.ok()) {
return NewErrorIterator(s);
Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
Iterator* result = table->NewIterator(options);
result->RegisterCleanup(&UnrefEntry, cache_, handle);
- if (tableptr != NULL) {
+ if (tableptr != nullptr) {
*tableptr = table;
}
return result;
const Slice& k,
void* arg,
void (*saver)(void*, const Slice&, const Slice&)) {
- Cache::Handle* handle = NULL;
+ Cache::Handle* handle = nullptr;
Status s = FindTable(file_number, file_size, &handle);
if (s.ok()) {
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
class TableCache {
public:
- TableCache(const std::string& dbname, const Options* options, int entries);
+ TableCache(const std::string& dbname, const Options& options, int entries);
~TableCache();
// Return an iterator for the specified file number (the corresponding
// file length must be exactly "file_size" bytes). If "tableptr" is
- // non-NULL, also sets "*tableptr" to point to the Table object
- // underlying the returned iterator, or NULL if no Table object underlies
- // the returned iterator. The returned "*tableptr" object is owned by
- // the cache and should not be deleted, and is valid for as long as the
+ // non-null, also sets "*tableptr" to point to the Table object
+ // underlying the returned iterator, or to nullptr if no Table object
+ // underlies the returned iterator. The returned "*tableptr" object is owned
+ // by the cache and should not be deleted, and is valid for as long as the
// returned iterator is live.
Iterator* NewIterator(const ReadOptions& options,
uint64_t file_number,
uint64_t file_size,
- Table** tableptr = NULL);
+ Table** tableptr = nullptr);
// If a seek to internal key "k" in specified file finds an entry,
// call (*handle_result)(arg, found_key, found_value).
private:
Env* const env_;
const std::string dbname_;
- const Options* options_;
+ const Options& options_;
Cache* cache_;
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
Status VersionEdit::DecodeFrom(const Slice& src) {
Clear();
Slice input = src;
- const char* msg = NULL;
+ const char* msg = nullptr;
uint32_t tag;
// Temporary storage for parsing
Slice str;
InternalKey key;
- while (msg == NULL && GetVarint32(&input, &tag)) {
+ while (msg == nullptr && GetVarint32(&input, &tag)) {
switch (tag) {
case kComparator:
if (GetLengthPrefixedSlice(&input, &str)) {
}
}
- if (msg == NULL && !input.empty()) {
+ if (msg == nullptr && !input.empty()) {
msg = "invalid tag";
}
Status result;
- if (msg != NULL) {
+ if (msg != nullptr) {
result = Status::Corruption("VersionEdit", msg);
}
return result;
namespace leveldb {
-static int TargetFileSize(const Options* options) {
+static size_t TargetFileSize(const Options* options) {
return options->max_file_size;
}
static bool AfterFile(const Comparator* ucmp,
const Slice* user_key, const FileMetaData* f) {
- // NULL user_key occurs before all keys and is therefore never after *f
- return (user_key != NULL &&
+ // null user_key occurs before all keys and is therefore never after *f
+ return (user_key != nullptr &&
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
}
static bool BeforeFile(const Comparator* ucmp,
const Slice* user_key, const FileMetaData* f) {
- // NULL user_key occurs after all keys and is therefore never before *f
- return (user_key != NULL &&
+ // null user_key occurs after all keys and is therefore never before *f
+ return (user_key != nullptr &&
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
}
// Binary search over file list
uint32_t index = 0;
- if (smallest_user_key != NULL) {
+ if (smallest_user_key != nullptr) {
// Find the earliest possible internal key for smallest_user_key
- InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
- index = FindFile(icmp, files, small.Encode());
+ InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
+ index = FindFile(icmp, files, small_key.Encode());
}
if (index >= files.size()) {
const Comparator* ucmp = vset_->icmp_.user_comparator();
Status s;
- stats->seek_file = NULL;
+ stats->seek_file = nullptr;
stats->seek_file_level = -1;
- FileMetaData* last_file_read = NULL;
+ FileMetaData* last_file_read = nullptr;
int last_file_read_level = -1;
// We can search level-by-level since entries never hop across
// levels. Therefore we are guaranteed that if we find data
- // in an smaller level, later levels are irrelevant.
+ // in a smaller level, later levels are irrelevant.
std::vector<FileMetaData*> tmp;
FileMetaData* tmp2;
for (int level = 0; level < config::kNumLevels; level++) {
// Binary search to find earliest index whose largest key >= ikey.
uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
if (index >= num_files) {
- files = NULL;
+ files = nullptr;
num_files = 0;
} else {
tmp2 = files[index];
if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
// All of "tmp2" is past any data for user_key
- files = NULL;
+ files = nullptr;
num_files = 0;
} else {
files = &tmp2;
}
for (uint32_t i = 0; i < num_files; ++i) {
- if (last_file_read != NULL && stats->seek_file == NULL) {
+ if (last_file_read != nullptr && stats->seek_file == nullptr) {
// We have had more than one seek for this read. Charge the 1st file.
stats->seek_file = last_file_read;
stats->seek_file_level = last_file_read_level;
bool Version::UpdateStats(const GetStats& stats) {
FileMetaData* f = stats.seek_file;
- if (f != NULL) {
+ if (f != nullptr) {
f->allowed_seeks--;
- if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) {
+ if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) {
file_to_compact_ = f;
file_to_compact_level_ = stats.seek_file_level;
return true;
assert(level < config::kNumLevels);
inputs->clear();
Slice user_begin, user_end;
- if (begin != NULL) {
+ if (begin != nullptr) {
user_begin = begin->user_key();
}
- if (end != NULL) {
+ if (end != nullptr) {
user_end = end->user_key();
}
const Comparator* user_cmp = vset_->icmp_.user_comparator();
FileMetaData* f = files_[level][i++];
const Slice file_start = f->smallest.user_key();
const Slice file_limit = f->largest.user_key();
- if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) {
+ if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) {
// "f" is completely before specified range; skip it
- } else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) {
+ } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) {
// "f" is completely after specified range; skip it
} else {
inputs->push_back(f);
if (level == 0) {
// Level-0 files may overlap each other. So check if the newly
// added file has expanded the range. If so, restart search.
- if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) {
+ if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) {
user_begin = file_start;
inputs->clear();
i = 0;
- } else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) {
+ } else if (end != nullptr && user_cmp->Compare(file_limit,
+ user_end) > 0) {
user_end = file_limit;
inputs->clear();
i = 0;
// same as the compaction of 40KB of data. We are a little
// conservative and allow approximately one seek for every 16KB
// of data before triggering a compaction.
- f->allowed_seeks = (f->file_size / 16384);
+ f->allowed_seeks = static_cast<int>((f->file_size / 16384U));
if (f->allowed_seeks < 100) f->allowed_seeks = 100;
levels_[level].deleted_files.erase(f->number);
last_sequence_(0),
log_number_(0),
prev_log_number_(0),
- descriptor_file_(NULL),
- descriptor_log_(NULL),
+ descriptor_file_(nullptr),
+ descriptor_log_(nullptr),
dummy_versions_(this),
- current_(NULL) {
+ current_(nullptr) {
AppendVersion(new Version(this));
}
// Make "v" current
assert(v->refs_ == 0);
assert(v != current_);
- if (current_ != NULL) {
+ if (current_ != nullptr) {
current_->Unref();
}
current_ = v;
// a temporary file that contains a snapshot of the current version.
std::string new_manifest_file;
Status s;
- if (descriptor_log_ == NULL) {
+ if (descriptor_log_ == nullptr) {
// No reason to unlock *mu here since we only hit this path in the
// first call to LogAndApply (when opening the database).
- assert(descriptor_file_ == NULL);
+ assert(descriptor_file_ == nullptr);
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
edit->SetNextFile(next_file_number_);
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
if (!new_manifest_file.empty()) {
delete descriptor_log_;
delete descriptor_file_;
- descriptor_log_ = NULL;
- descriptor_file_ = NULL;
+ descriptor_log_ = nullptr;
+ descriptor_file_ = nullptr;
env_->DeleteFile(new_manifest_file);
}
}
SequentialFile* file;
s = env_->NewSequentialFile(dscname, &file);
if (!s.ok()) {
+ if (s.IsNotFound()) {
+ return Status::Corruption(
+ "CURRENT points to a non-existent file", s.ToString());
+ }
return s;
}
}
}
delete file;
- file = NULL;
+ file = nullptr;
if (s.ok()) {
if (!have_next_file) {
return false;
}
- assert(descriptor_file_ == NULL);
- assert(descriptor_log_ == NULL);
+ assert(descriptor_file_ == nullptr);
+ assert(descriptor_log_ == nullptr);
Status r = env_->NewAppendableFile(dscname, &descriptor_file_);
if (!r.ok()) {
Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str());
- assert(descriptor_file_ == NULL);
+ assert(descriptor_file_ == nullptr);
return false;
}
Table* tableptr;
Iterator* iter = table_cache_->NewIterator(
ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
- if (tableptr != NULL) {
+ if (tableptr != nullptr) {
result += tableptr->ApproximateOffsetOf(ikey.Encode());
}
delete iter;
// We prefer compactions triggered by too much data in a level over
// the compactions triggered by seeks.
const bool size_compaction = (current_->compaction_score_ >= 1);
- const bool seek_compaction = (current_->file_to_compact_ != NULL);
+ const bool seek_compaction = (current_->file_to_compact_ != nullptr);
if (size_compaction) {
level = current_->compaction_level_;
assert(level >= 0);
c = new Compaction(options_, level);
c->inputs_[0].push_back(current_->file_to_compact_);
} else {
- return NULL;
+ return nullptr;
}
c->input_version_ = current_;
&c->grandparents_);
}
- if (false) {
- Log(options_->info_log, "Compacting %d '%s' .. '%s'",
- level,
- smallest.DebugString().c_str(),
- largest.DebugString().c_str());
- }
-
// Update the place where we will do the next compaction for this level.
// We update this immediately instead of waiting for the VersionEdit
// to be applied so that if the compaction fails, we will try a different
std::vector<FileMetaData*> inputs;
current_->GetOverlappingInputs(level, begin, end, &inputs);
if (inputs.empty()) {
- return NULL;
+ return nullptr;
}
// Avoid compacting too much in one shot in case the range is large.
Compaction::Compaction(const Options* options, int level)
: level_(level),
max_output_file_size_(MaxFileSizeForLevel(options, level)),
- input_version_(NULL),
+ input_version_(nullptr),
grandparent_index_(0),
seen_key_(false),
overlapped_bytes_(0) {
}
Compaction::~Compaction() {
- if (input_version_ != NULL) {
+ if (input_version_ != nullptr) {
input_version_->Unref();
}
}
}
void Compaction::ReleaseInputs() {
- if (input_version_ != NULL) {
+ if (input_version_ != nullptr) {
input_version_->Unref();
- input_version_ = NULL;
+ input_version_ = nullptr;
}
}
// Return the smallest index i such that files[i]->largest >= key.
// Return files.size() if there is no such file.
// REQUIRES: "files" contains a sorted list of non-overlapping files.
-extern int FindFile(const InternalKeyComparator& icmp,
- const std::vector<FileMetaData*>& files,
- const Slice& key);
+int FindFile(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& files,
+ const Slice& key);
// Returns true iff some file in "files" overlaps the user key range
// [*smallest,*largest].
-// smallest==NULL represents a key smaller than all keys in the DB.
-// largest==NULL represents a key largest than all keys in the DB.
+// smallest==nullptr represents a key smaller than all keys in the DB.
+// largest==nullptr represents a key largest than all keys in the DB.
// REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges
// in sorted order.
-extern bool SomeFileOverlapsRange(
- const InternalKeyComparator& icmp,
- bool disjoint_sorted_files,
- const std::vector<FileMetaData*>& files,
- const Slice* smallest_user_key,
- const Slice* largest_user_key);
+bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+ bool disjoint_sorted_files,
+ const std::vector<FileMetaData*>& files,
+ const Slice* smallest_user_key,
+ const Slice* largest_user_key);
class Version {
public:
void GetOverlappingInputs(
int level,
- const InternalKey* begin, // NULL means before all keys
- const InternalKey* end, // NULL means after all keys
+ const InternalKey* begin, // nullptr means before all keys
+ const InternalKey* end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs);
// Returns true iff some file in the specified level overlaps
// some part of [*smallest_user_key,*largest_user_key].
- // smallest_user_key==NULL represents a key smaller than all keys in the DB.
- // largest_user_key==NULL represents a key largest than all keys in the DB.
+ // smallest_user_key==nullptr represents a key smaller than all the DB's keys.
+ // largest_user_key==nullptr represents a key largest than all the DB's keys.
bool OverlapInLevel(int level,
const Slice* smallest_user_key,
const Slice* largest_user_key);
explicit Version(VersionSet* vset)
: vset_(vset), next_(this), prev_(this), refs_(0),
- file_to_compact_(NULL),
+ file_to_compact_(nullptr),
file_to_compact_level_(-1),
compaction_score_(-1),
compaction_level_(-1) {
uint64_t PrevLogNumber() const { return prev_log_number_; }
// Pick level and inputs for a new compaction.
- // Returns NULL if there is no compaction to be done.
+ // Returns nullptr if there is no compaction to be done.
// Otherwise returns a pointer to a heap-allocated object that
// describes the compaction. Caller should delete the result.
Compaction* PickCompaction();
// Return a compaction object for compacting the range [begin,end] in
- // the specified level. Returns NULL if there is nothing in that
+ // the specified level. Returns nullptr if there is nothing in that
// level that overlaps the specified range. Caller should delete
// the result.
Compaction* CompactRange(
// Returns true iff some level needs a compaction.
bool NeedsCompaction() const {
Version* v = current_;
- return (v->compaction_score_ >= 1) || (v->file_to_compact_ != NULL);
+ return (v->compaction_score_ >= 1) || (v->file_to_compact_ != nullptr);
}
// Add all files listed in any live version to *live.
bool Overlaps(const char* smallest, const char* largest) {
InternalKeyComparator cmp(BytewiseComparator());
- Slice s(smallest != NULL ? smallest : "");
- Slice l(largest != NULL ? largest : "");
+ Slice s(smallest != nullptr ? smallest : "");
+ Slice l(largest != nullptr ? largest : "");
return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_,
- (smallest != NULL ? &s : NULL),
- (largest != NULL ? &l : NULL));
+ (smallest != nullptr ? &s : nullptr),
+ (largest != nullptr ? &l : nullptr));
}
};
TEST(FindFileTest, Empty) {
ASSERT_EQ(0, Find("foo"));
ASSERT_TRUE(! Overlaps("a", "z"));
- ASSERT_TRUE(! Overlaps(NULL, "z"));
- ASSERT_TRUE(! Overlaps("a", NULL));
- ASSERT_TRUE(! Overlaps(NULL, NULL));
+ ASSERT_TRUE(! Overlaps(nullptr, "z"));
+ ASSERT_TRUE(! Overlaps("a", nullptr));
+ ASSERT_TRUE(! Overlaps(nullptr, nullptr));
}
TEST(FindFileTest, Single) {
ASSERT_TRUE(Overlaps("q", "q"));
ASSERT_TRUE(Overlaps("q", "q1"));
- ASSERT_TRUE(! Overlaps(NULL, "j"));
- ASSERT_TRUE(! Overlaps("r", NULL));
- ASSERT_TRUE(Overlaps(NULL, "p"));
- ASSERT_TRUE(Overlaps(NULL, "p1"));
- ASSERT_TRUE(Overlaps("q", NULL));
- ASSERT_TRUE(Overlaps(NULL, NULL));
+ ASSERT_TRUE(! Overlaps(nullptr, "j"));
+ ASSERT_TRUE(! Overlaps("r", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, "p"));
+ ASSERT_TRUE(Overlaps(nullptr, "p1"));
+ ASSERT_TRUE(Overlaps("q", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, nullptr));
}
Add("200", "250");
Add("300", "350");
Add("400", "450");
- ASSERT_TRUE(! Overlaps(NULL, "149"));
- ASSERT_TRUE(! Overlaps("451", NULL));
- ASSERT_TRUE(Overlaps(NULL, NULL));
- ASSERT_TRUE(Overlaps(NULL, "150"));
- ASSERT_TRUE(Overlaps(NULL, "199"));
- ASSERT_TRUE(Overlaps(NULL, "200"));
- ASSERT_TRUE(Overlaps(NULL, "201"));
- ASSERT_TRUE(Overlaps(NULL, "400"));
- ASSERT_TRUE(Overlaps(NULL, "800"));
- ASSERT_TRUE(Overlaps("100", NULL));
- ASSERT_TRUE(Overlaps("200", NULL));
- ASSERT_TRUE(Overlaps("449", NULL));
- ASSERT_TRUE(Overlaps("450", NULL));
+ ASSERT_TRUE(! Overlaps(nullptr, "149"));
+ ASSERT_TRUE(! Overlaps("451", nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, nullptr));
+ ASSERT_TRUE(Overlaps(nullptr, "150"));
+ ASSERT_TRUE(Overlaps(nullptr, "199"));
+ ASSERT_TRUE(Overlaps(nullptr, "200"));
+ ASSERT_TRUE(Overlaps(nullptr, "201"));
+ ASSERT_TRUE(Overlaps(nullptr, "400"));
+ ASSERT_TRUE(Overlaps(nullptr, "800"));
+ ASSERT_TRUE(Overlaps("100", nullptr));
+ ASSERT_TRUE(Overlaps("200", nullptr));
+ ASSERT_TRUE(Overlaps("449", nullptr));
+ ASSERT_TRUE(Overlaps("450", nullptr));
}
TEST(FindFileTest, OverlapSequenceChecks) {
rep_.resize(kHeader);
}
+size_t WriteBatch::ApproximateSize() const {
+ return rep_.size();
+}
+
Status WriteBatch::Iterate(Handler* handler) const {
Slice input(rep_);
if (input.size() < kHeader) {
PutLengthPrefixedSlice(&rep_, key);
}
+void WriteBatch::Append(const WriteBatch &source) {
+ WriteBatchInternal::Append(this, &source);
+}
+
namespace {
class MemTableInserter : public WriteBatch::Handler {
public:
WriteBatch b1, b2;
WriteBatchInternal::SetSequence(&b1, 200);
WriteBatchInternal::SetSequence(&b2, 300);
- WriteBatchInternal::Append(&b1, &b2);
+ b1.Append(b2);
ASSERT_EQ("",
PrintContents(&b1));
b2.Put("a", "va");
- WriteBatchInternal::Append(&b1, &b2);
+ b1.Append(b2);
ASSERT_EQ("Put(a, va)@200",
PrintContents(&b1));
b2.Clear();
b2.Put("b", "vb");
- WriteBatchInternal::Append(&b1, &b2);
+ b1.Append(b2);
ASSERT_EQ("Put(a, va)@200"
"Put(b, vb)@201",
PrintContents(&b1));
b2.Delete("foo");
- WriteBatchInternal::Append(&b1, &b2);
+ b1.Append(b2);
ASSERT_EQ("Put(a, va)@200"
"Put(b, vb)@202"
"Put(b, vb)@201"
PrintContents(&b1));
}
+TEST(WriteBatchTest, ApproximateSize) {
+ WriteBatch batch;
+ size_t empty_size = batch.ApproximateSize();
+
+ batch.Put(Slice("foo"), Slice("bar"));
+ size_t one_key_size = batch.ApproximateSize();
+ ASSERT_LT(empty_size, one_key_size);
+
+ batch.Put(Slice("baz"), Slice("boo"));
+ size_t two_keys_size = batch.ApproximateSize();
+ ASSERT_LT(one_key_size, two_keys_size);
+
+ batch.Delete(Slice("box"));
+ size_t post_delete_size = batch.ApproximateSize();
+ ASSERT_LT(two_keys_size, post_delete_size);
+}
+
} // namespace leveldb
int main(int argc, char** argv) {
static bool FLAGS_WAL_enabled = true;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
inline
static void ExecErrorCheck(int status, char *err_msg) {
static void WalCheckpoint(sqlite3* db_) {
// Flush all writes to disk
if (FLAGS_WAL_enabled) {
- sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL);
+ sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
+ nullptr);
}
}
fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
};
Benchmark()
- : db_(NULL),
+ : db_(nullptr),
db_num_(0),
num_(FLAGS_num),
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
Open();
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
}
void Open() {
- assert(db_ == NULL);
+ assert(db_ == nullptr);
int status;
char file_name[100];
- char* err_msg = NULL;
+ char* err_msg = nullptr;
db_num_++;
// Open database
char cache_size[100];
snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
FLAGS_num_pages);
- status = sqlite3_exec(db_, cache_size, NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
// FLAGS_page_size is defaulted to 1024
char page_size[100];
snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
FLAGS_page_size);
- status = sqlite3_exec(db_, page_size, NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
// LevelDB's default cache size is a combined 4 MB
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
- status = sqlite3_exec(db_, WAL_stmt.c_str(), NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
- status = sqlite3_exec(db_, WAL_checkpoint.c_str(), NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr,
+ &err_msg);
ExecErrorCheck(status, err_msg);
}
std::string stmt_array[] = { locking_stmt, create_stmt };
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
for (int i = 0; i < stmt_array_length; i++) {
- status = sqlite3_exec(db_, stmt_array[i].c_str(), NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr,
+ &err_msg);
ExecErrorCheck(status, err_msg);
}
}
return;
}
sqlite3_close(db_);
- db_ = NULL;
+ db_ = nullptr;
Open();
Start();
}
message_ = msg;
}
- char* err_msg = NULL;
+ char* err_msg = nullptr;
int status;
sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt;
// Check for synchronous flag in options
std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
"PRAGMA synchronous = OFF";
- status = sqlite3_exec(db_, sync_stmt.c_str(), NULL, NULL, &err_msg);
+ status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
// Preparing sqlite3 statements
status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
- &replace_stmt, NULL);
+ &replace_stmt, nullptr);
ErrorCheck(status);
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
- &begin_trans_stmt, NULL);
+ &begin_trans_stmt, nullptr);
ErrorCheck(status);
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
- &end_trans_stmt, NULL);
+ &end_trans_stmt, nullptr);
ErrorCheck(status);
bool transaction = (entries_per_batch > 1);
// Preparing sqlite3 statements
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
- &begin_trans_stmt, NULL);
+ &begin_trans_stmt, nullptr);
ErrorCheck(status);
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
- &end_trans_stmt, NULL);
+ &end_trans_stmt, nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, NULL);
+ status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
ErrorCheck(status);
bool transaction = (entries_per_batch > 1);
sqlite3_stmt *pStmt;
std::string read_str = "SELECT * FROM test ORDER BY key";
- status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, NULL);
+ status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
ErrorCheck(status);
for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) {
bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2);
}
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
+ if (FLAGS_db == nullptr) {
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
default_db_path += "/dbbench";
FLAGS_db = default_db_path.c_str();
static bool FLAGS_compression = true;
// Use the db with the following name.
-static const char* FLAGS_db = NULL;
+static const char* FLAGS_db = nullptr;
inline
static void DBSynchronize(kyotocabinet::TreeDB* db_)
kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
#if defined(__linux)
- time_t now = time(NULL);
+ time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
- if (cpuinfo != NULL) {
+ if (cpuinfo != nullptr) {
char line[1000];
int num_cpus = 0;
std::string cpu_type;
std::string cache_size;
- while (fgets(line, sizeof(line), cpuinfo) != NULL) {
+ while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
const char* sep = strchr(line, ':');
- if (sep == NULL) {
+ if (sep == nullptr) {
continue;
}
Slice key = TrimSpace(Slice(line, sep - 1 - line));
};
Benchmark()
- : db_(NULL),
+ : db_(nullptr),
num_(FLAGS_num),
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
bytes_(0),
Open(false);
const char* benchmarks = FLAGS_benchmarks;
- while (benchmarks != NULL) {
+ while (benchmarks != nullptr) {
const char* sep = strchr(benchmarks, ',');
Slice name;
- if (sep == NULL) {
+ if (sep == nullptr) {
name = benchmarks;
- benchmarks = NULL;
+ benchmarks = nullptr;
} else {
name = Slice(benchmarks, sep - benchmarks);
benchmarks = sep + 1;
private:
void Open(bool sync) {
- assert(db_ == NULL);
+ assert(db_ == nullptr);
// Initialize db_
db_ = new kyotocabinet::TreeDB();
return;
}
delete db_;
- db_ = NULL;
+ db_ = nullptr;
Open(sync);
Start(); // Do not count time taken to destroy/open
}
}
// Choose a location for the test database if none given with --db=<path>
- if (FLAGS_db == NULL) {
+ if (FLAGS_db == nullptr) {
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
default_db_path += "/dbbench";
FLAGS_db = default_db_path.c_str();
## Level 0
-When the log file grows above a certain size (1MB by default):
-Create a brand new memtable and log file and direct future updates here
+When the log file grows above a certain size (4MB by default):
+Create a brand new memtable and log file and direct future updates here.
+
In the background:
-Write the contents of the previous memtable to an sstable
-Discard the memtable
-Delete the old log file and the old memtable
-Add the new sstable to the young (level-0) level.
+
+1. Write the contents of the previous memtable to an sstable.
+2. Discard the memtable.
+3. Delete the old log file and the old memtable.
+4. Add the new sstable to the young (level-0) level.
## Compactions
### Cache
The contents of the database are stored in a set of files in the filesystem and
-each file stores a sequence of compressed blocks. If options.cache is non-NULL,
-it is used to cache frequently used uncompressed block contents.
+each file stores a sequence of compressed blocks. If options.block_cache is
+non-NULL, it is used to cache frequently used uncompressed block contents.
```c++
#include "leveldb/cache.h"
leveldb::Options options;
-options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
+options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache
leveldb::DB* db;
leveldb::DB::Open(options, name, &db);
... use the db ...
delete db
-delete options.cache;
+delete options.block_cache;
```
Note that the cache holds uncompressed data, and therefore it should be sized
#include "helpers/memenv/memenv.h"
+#include <string.h>
+
+#include <limits>
+#include <map>
+#include <string>
+#include <vector>
+
#include "leveldb/env.h"
#include "leveldb/status.h"
#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/mutexlock.h"
-#include <map>
-#include <string.h>
-#include <string>
-#include <vector>
namespace leveldb {
}
}
- uint64_t Size() const { return size_; }
+ uint64_t Size() const {
+ MutexLock lock(&blocks_mutex_);
+ return size_;
+ }
+
+ void Truncate() {
+ MutexLock lock(&blocks_mutex_);
+ for (char*& block : blocks_) {
+ delete[] block;
+ }
+ blocks_.clear();
+ size_ = 0;
+ }
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
+ MutexLock lock(&blocks_mutex_);
if (offset > size_) {
return Status::IOError("Offset greater than file size.");
}
return Status::OK();
}
- assert(offset / kBlockSize <= SIZE_MAX);
+ assert(offset / kBlockSize <= std::numeric_limits<size_t>::max());
size_t block = static_cast<size_t>(offset / kBlockSize);
size_t block_offset = offset % kBlockSize;
-
- if (n <= kBlockSize - block_offset) {
- // The requested bytes are all in the first block.
- *result = Slice(blocks_[block] + block_offset, n);
- return Status::OK();
- }
-
size_t bytes_to_copy = n;
char* dst = scratch;
const char* src = data.data();
size_t src_len = data.size();
+ MutexLock lock(&blocks_mutex_);
while (src_len > 0) {
size_t avail;
size_t offset = size_ % kBlockSize;
private:
// Private since only Unref() should be used to delete it.
~FileState() {
- for (std::vector<char*>::iterator i = blocks_.begin(); i != blocks_.end();
- ++i) {
- delete [] *i;
- }
+ Truncate();
}
// No copying allowed.
void operator=(const FileState&);
port::Mutex refs_mutex_;
- int refs_; // Protected by refs_mutex_;
+ int refs_ GUARDED_BY(refs_mutex_);
- // The following fields are not protected by any mutex. They are only mutable
- // while the file is being written, and concurrent access is not allowed
- // to writable files.
- std::vector<char*> blocks_;
- uint64_t size_;
+ mutable port::Mutex blocks_mutex_;
+ std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
+ uint64_t size_ GUARDED_BY(blocks_mutex_);
enum { kBlockSize = 8 * 1024 };
};
SequentialFile** result) {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "File not found");
}
RandomAccessFile** result) {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "File not found");
}
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result) {
MutexLock lock(&mutex_);
- if (file_map_.find(fname) != file_map_.end()) {
- DeleteFileInternal(fname);
- }
+ FileSystem::iterator it = file_map_.find(fname);
- FileState* file = new FileState();
- file->Ref();
- file_map_[fname] = file;
+ FileState* file;
+ if (it == file_map_.end()) {
+ // File is not currently open.
+ file = new FileState();
+ file->Ref();
+ file_map_[fname] = file;
+ } else {
+ file = it->second;
+ file->Truncate();
+ }
*result = new WritableFileImpl(file);
return Status::OK();
MutexLock lock(&mutex_);
FileState** sptr = &file_map_[fname];
FileState* file = *sptr;
- if (file == NULL) {
+ if (file == nullptr) {
file = new FileState();
file->Ref();
}
return Status::OK();
}
- void DeleteFileInternal(const std::string& fname) {
+ void DeleteFileInternal(const std::string& fname)
+ EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
if (file_map_.find(fname) == file_map_.end()) {
return;
}
// Map from filenames to FileState objects, representing a simple file system.
typedef std::map<std::string, FileState*> FileSystem;
port::Mutex mutex_;
- FileSystem file_map_; // Protected by mutex_.
+ FileSystem file_map_ GUARDED_BY(mutex_);
};
} // namespace
#ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
#define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
+#include "leveldb/export.h"
+
namespace leveldb {
class Env;
// all non-file-storage tasks to base_env. The caller must delete the result
// when it is no longer needed.
// *base_env must remain live while the result is in use.
-Env* NewMemEnv(Env* base_env);
+LEVELDB_EXPORT Env* NewMemEnv(Env* base_env);
} // namespace leveldb
delete [] scratch;
}
+TEST(MemEnvTest, OverwriteOpenFile) {
+ const char kWrite1Data[] = "Write #1 data";
+ const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
+ const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat";
+
+ ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
+
+ RandomAccessFile* rand_file;
+ ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
+
+ const char kWrite2Data[] = "Write #2 data";
+ ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
+
+ // Verify that overwriting an open file will result in the new file data
+ // being read from files opened before the write.
+ Slice result;
+ char scratch[kFileDataLen];
+ ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
+ ASSERT_EQ(0, result.compare(kWrite2Data));
+
+ delete rand_file;
+}
+
TEST(MemEnvTest, DBTest) {
Options options;
options.create_if_missing = true;
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
+#include "leveldb/export.h"
/* Exported types */
/* DB operations */
-extern leveldb_t* leveldb_open(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT leveldb_t* leveldb_open(const leveldb_options_t* options,
+ const char* name, char** errptr);
-extern void leveldb_close(leveldb_t* db);
+LEVELDB_EXPORT void leveldb_close(leveldb_t* db);
-extern void leveldb_put(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- const char* val, size_t vallen,
- char** errptr);
+LEVELDB_EXPORT void leveldb_put(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, const char* val,
+ size_t vallen, char** errptr);
-extern void leveldb_delete(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- char** errptr);
+LEVELDB_EXPORT void leveldb_delete(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen,
+ char** errptr);
-extern void leveldb_write(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- leveldb_writebatch_t* batch,
- char** errptr);
+LEVELDB_EXPORT void leveldb_write(leveldb_t* db,
+ const leveldb_writeoptions_t* options,
+ leveldb_writebatch_t* batch, char** errptr);
/* Returns NULL if not found. A malloc()ed array otherwise.
Stores the length of the array in *vallen. */
-extern char* leveldb_get(
- leveldb_t* db,
- const leveldb_readoptions_t* options,
- const char* key, size_t keylen,
- size_t* vallen,
- char** errptr);
+LEVELDB_EXPORT char* leveldb_get(leveldb_t* db,
+ const leveldb_readoptions_t* options,
+ const char* key, size_t keylen, size_t* vallen,
+ char** errptr);
-extern leveldb_iterator_t* leveldb_create_iterator(
- leveldb_t* db,
- const leveldb_readoptions_t* options);
+LEVELDB_EXPORT leveldb_iterator_t* leveldb_create_iterator(
+ leveldb_t* db, const leveldb_readoptions_t* options);
-extern const leveldb_snapshot_t* leveldb_create_snapshot(
- leveldb_t* db);
+LEVELDB_EXPORT const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db);
-extern void leveldb_release_snapshot(
- leveldb_t* db,
- const leveldb_snapshot_t* snapshot);
+LEVELDB_EXPORT void leveldb_release_snapshot(
+ leveldb_t* db, const leveldb_snapshot_t* snapshot);
/* Returns NULL if property name is unknown.
Else returns a pointer to a malloc()-ed null-terminated value. */
-extern char* leveldb_property_value(
- leveldb_t* db,
- const char* propname);
-
-extern void leveldb_approximate_sizes(
- leveldb_t* db,
- int num_ranges,
- const char* const* range_start_key, const size_t* range_start_key_len,
- const char* const* range_limit_key, const size_t* range_limit_key_len,
- uint64_t* sizes);
-
-extern void leveldb_compact_range(
- leveldb_t* db,
- const char* start_key, size_t start_key_len,
- const char* limit_key, size_t limit_key_len);
+LEVELDB_EXPORT char* leveldb_property_value(leveldb_t* db,
+ const char* propname);
+
+LEVELDB_EXPORT void leveldb_approximate_sizes(
+ leveldb_t* db, int num_ranges, const char* const* range_start_key,
+ const size_t* range_start_key_len, const char* const* range_limit_key,
+ const size_t* range_limit_key_len, uint64_t* sizes);
+
+LEVELDB_EXPORT void leveldb_compact_range(leveldb_t* db, const char* start_key,
+ size_t start_key_len,
+ const char* limit_key,
+ size_t limit_key_len);
/* Management operations */
-extern void leveldb_destroy_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT void leveldb_destroy_db(const leveldb_options_t* options,
+ const char* name, char** errptr);
-extern void leveldb_repair_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr);
+LEVELDB_EXPORT void leveldb_repair_db(const leveldb_options_t* options,
+ const char* name, char** errptr);
/* Iterator */
-extern void leveldb_iter_destroy(leveldb_iterator_t*);
-extern unsigned char leveldb_iter_valid(const leveldb_iterator_t*);
-extern void leveldb_iter_seek_to_first(leveldb_iterator_t*);
-extern void leveldb_iter_seek_to_last(leveldb_iterator_t*);
-extern void leveldb_iter_seek(leveldb_iterator_t*, const char* k, size_t klen);
-extern void leveldb_iter_next(leveldb_iterator_t*);
-extern void leveldb_iter_prev(leveldb_iterator_t*);
-extern const char* leveldb_iter_key(const leveldb_iterator_t*, size_t* klen);
-extern const char* leveldb_iter_value(const leveldb_iterator_t*, size_t* vlen);
-extern void leveldb_iter_get_error(const leveldb_iterator_t*, char** errptr);
+LEVELDB_EXPORT void leveldb_iter_destroy(leveldb_iterator_t*);
+LEVELDB_EXPORT unsigned char leveldb_iter_valid(const leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek_to_first(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek_to_last(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_seek(leveldb_iterator_t*, const char* k,
+ size_t klen);
+LEVELDB_EXPORT void leveldb_iter_next(leveldb_iterator_t*);
+LEVELDB_EXPORT void leveldb_iter_prev(leveldb_iterator_t*);
+LEVELDB_EXPORT const char* leveldb_iter_key(const leveldb_iterator_t*,
+ size_t* klen);
+LEVELDB_EXPORT const char* leveldb_iter_value(const leveldb_iterator_t*,
+ size_t* vlen);
+LEVELDB_EXPORT void leveldb_iter_get_error(const leveldb_iterator_t*,
+ char** errptr);
/* Write batch */
-extern leveldb_writebatch_t* leveldb_writebatch_create();
-extern void leveldb_writebatch_destroy(leveldb_writebatch_t*);
-extern void leveldb_writebatch_clear(leveldb_writebatch_t*);
-extern void leveldb_writebatch_put(
- leveldb_writebatch_t*,
- const char* key, size_t klen,
- const char* val, size_t vlen);
-extern void leveldb_writebatch_delete(
- leveldb_writebatch_t*,
- const char* key, size_t klen);
-extern void leveldb_writebatch_iterate(
- leveldb_writebatch_t*,
- void* state,
+LEVELDB_EXPORT leveldb_writebatch_t* leveldb_writebatch_create();
+LEVELDB_EXPORT void leveldb_writebatch_destroy(leveldb_writebatch_t*);
+LEVELDB_EXPORT void leveldb_writebatch_clear(leveldb_writebatch_t*);
+LEVELDB_EXPORT void leveldb_writebatch_put(leveldb_writebatch_t*,
+ const char* key, size_t klen,
+ const char* val, size_t vlen);
+LEVELDB_EXPORT void leveldb_writebatch_delete(leveldb_writebatch_t*,
+ const char* key, size_t klen);
+LEVELDB_EXPORT void leveldb_writebatch_iterate(
+ const leveldb_writebatch_t*, void* state,
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
void (*deleted)(void*, const char* k, size_t klen));
+LEVELDB_EXPORT void leveldb_writebatch_append(
+ leveldb_writebatch_t* destination, const leveldb_writebatch_t* source);
/* Options */
-extern leveldb_options_t* leveldb_options_create();
-extern void leveldb_options_destroy(leveldb_options_t*);
-extern void leveldb_options_set_comparator(
- leveldb_options_t*,
- leveldb_comparator_t*);
-extern void leveldb_options_set_filter_policy(
- leveldb_options_t*,
- leveldb_filterpolicy_t*);
-extern void leveldb_options_set_create_if_missing(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_error_if_exists(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_paranoid_checks(
- leveldb_options_t*, unsigned char);
-extern void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
-extern void leveldb_options_set_info_log(leveldb_options_t*, leveldb_logger_t*);
-extern void leveldb_options_set_write_buffer_size(leveldb_options_t*, size_t);
-extern void leveldb_options_set_max_open_files(leveldb_options_t*, int);
-extern void leveldb_options_set_cache(leveldb_options_t*, leveldb_cache_t*);
-extern void leveldb_options_set_block_size(leveldb_options_t*, size_t);
-extern void leveldb_options_set_block_restart_interval(leveldb_options_t*, int);
+LEVELDB_EXPORT leveldb_options_t* leveldb_options_create();
+LEVELDB_EXPORT void leveldb_options_destroy(leveldb_options_t*);
+LEVELDB_EXPORT void leveldb_options_set_comparator(leveldb_options_t*,
+ leveldb_comparator_t*);
+LEVELDB_EXPORT void leveldb_options_set_filter_policy(leveldb_options_t*,
+ leveldb_filterpolicy_t*);
+LEVELDB_EXPORT void leveldb_options_set_create_if_missing(leveldb_options_t*,
+ unsigned char);
+LEVELDB_EXPORT void leveldb_options_set_error_if_exists(leveldb_options_t*,
+ unsigned char);
+LEVELDB_EXPORT void leveldb_options_set_paranoid_checks(leveldb_options_t*,
+ unsigned char);
+LEVELDB_EXPORT void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*);
+LEVELDB_EXPORT void leveldb_options_set_info_log(leveldb_options_t*,
+ leveldb_logger_t*);
+LEVELDB_EXPORT void leveldb_options_set_write_buffer_size(leveldb_options_t*,
+ size_t);
+LEVELDB_EXPORT void leveldb_options_set_max_open_files(leveldb_options_t*, int);
+LEVELDB_EXPORT void leveldb_options_set_cache(leveldb_options_t*,
+ leveldb_cache_t*);
+LEVELDB_EXPORT void leveldb_options_set_block_size(leveldb_options_t*, size_t);
+LEVELDB_EXPORT void leveldb_options_set_block_restart_interval(
+ leveldb_options_t*, int);
+LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
+ size_t);
enum {
leveldb_no_compression = 0,
leveldb_snappy_compression = 1
};
-extern void leveldb_options_set_compression(leveldb_options_t*, int);
+LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
/* Comparator */
-extern leveldb_comparator_t* leveldb_comparator_create(
- void* state,
- void (*destructor)(void*),
- int (*compare)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen),
+LEVELDB_EXPORT leveldb_comparator_t* leveldb_comparator_create(
+ void* state, void (*destructor)(void*),
+ int (*compare)(void*, const char* a, size_t alen, const char* b,
+ size_t blen),
const char* (*name)(void*));
-extern void leveldb_comparator_destroy(leveldb_comparator_t*);
+LEVELDB_EXPORT void leveldb_comparator_destroy(leveldb_comparator_t*);
/* Filter policy */
-extern leveldb_filterpolicy_t* leveldb_filterpolicy_create(
- void* state,
- void (*destructor)(void*),
- char* (*create_filter)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length),
- unsigned char (*key_may_match)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length),
+LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create(
+ void* state, void (*destructor)(void*),
+ char* (*create_filter)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length),
+ unsigned char (*key_may_match)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length),
const char* (*name)(void*));
-extern void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
+LEVELDB_EXPORT void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*);
-extern leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
+LEVELDB_EXPORT leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(
int bits_per_key);
/* Read options */
-extern leveldb_readoptions_t* leveldb_readoptions_create();
-extern void leveldb_readoptions_destroy(leveldb_readoptions_t*);
-extern void leveldb_readoptions_set_verify_checksums(
- leveldb_readoptions_t*,
- unsigned char);
-extern void leveldb_readoptions_set_fill_cache(
+LEVELDB_EXPORT leveldb_readoptions_t* leveldb_readoptions_create();
+LEVELDB_EXPORT void leveldb_readoptions_destroy(leveldb_readoptions_t*);
+LEVELDB_EXPORT void leveldb_readoptions_set_verify_checksums(
leveldb_readoptions_t*, unsigned char);
-extern void leveldb_readoptions_set_snapshot(
- leveldb_readoptions_t*,
- const leveldb_snapshot_t*);
+LEVELDB_EXPORT void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t*,
+ unsigned char);
+LEVELDB_EXPORT void leveldb_readoptions_set_snapshot(leveldb_readoptions_t*,
+ const leveldb_snapshot_t*);
/* Write options */
-extern leveldb_writeoptions_t* leveldb_writeoptions_create();
-extern void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
-extern void leveldb_writeoptions_set_sync(
- leveldb_writeoptions_t*, unsigned char);
+LEVELDB_EXPORT leveldb_writeoptions_t* leveldb_writeoptions_create();
+LEVELDB_EXPORT void leveldb_writeoptions_destroy(leveldb_writeoptions_t*);
+LEVELDB_EXPORT void leveldb_writeoptions_set_sync(leveldb_writeoptions_t*,
+ unsigned char);
/* Cache */
-extern leveldb_cache_t* leveldb_cache_create_lru(size_t capacity);
-extern void leveldb_cache_destroy(leveldb_cache_t* cache);
+LEVELDB_EXPORT leveldb_cache_t* leveldb_cache_create_lru(size_t capacity);
+LEVELDB_EXPORT void leveldb_cache_destroy(leveldb_cache_t* cache);
/* Env */
-extern leveldb_env_t* leveldb_create_default_env();
-extern void leveldb_env_destroy(leveldb_env_t*);
+LEVELDB_EXPORT leveldb_env_t* leveldb_create_default_env();
+LEVELDB_EXPORT void leveldb_env_destroy(leveldb_env_t*);
+
+/* If not NULL, the returned buffer must be released using leveldb_free(). */
+LEVELDB_EXPORT char* leveldb_env_get_test_directory(leveldb_env_t*);
/* Utility */
in this file. Note that in certain cases (typically on Windows), you
may need to call this routine instead of free(ptr) to dispose of
malloc()-ed memory returned by this library. */
-extern void leveldb_free(void* ptr);
+LEVELDB_EXPORT void leveldb_free(void* ptr);
/* Return the major version number for this release. */
-extern int leveldb_major_version();
+LEVELDB_EXPORT int leveldb_major_version();
/* Return the minor version number for this release. */
-extern int leveldb_minor_version();
+LEVELDB_EXPORT int leveldb_minor_version();
#ifdef __cplusplus
} /* end extern "C" */
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
#include <stdint.h>
+#include "leveldb/export.h"
#include "leveldb/slice.h"
namespace leveldb {
-class Cache;
+class LEVELDB_EXPORT Cache;
// Create a new cache with a fixed size capacity. This implementation
// of Cache uses a least-recently-used eviction policy.
-extern Cache* NewLRUCache(size_t capacity);
+LEVELDB_EXPORT Cache* NewLRUCache(size_t capacity);
-class Cache {
+class LEVELDB_EXPORT Cache {
public:
- Cache() { }
+ Cache() = default;
+
+ Cache(const Cache&) = delete;
+ Cache& operator=(const Cache&) = delete;
// Destroys all existing entries by calling the "deleter"
// function that was passed to the constructor.
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) = 0;
- // If the cache has no mapping for "key", returns NULL.
+ // If the cache has no mapping for "key", returns nullptr.
//
// Else return a handle that corresponds to the mapping. The caller
// must call this->Release(handle) when the returned mapping is no
struct Rep;
Rep* rep_;
-
- // No copying allowed
- Cache(const Cache&);
- void operator=(const Cache&);
};
} // namespace leveldb
#define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_
#include <string>
+#include "leveldb/export.h"
namespace leveldb {
// used as keys in an sstable or a database. A Comparator implementation
// must be thread-safe since leveldb may invoke its methods concurrently
// from multiple threads.
-class Comparator {
+class LEVELDB_EXPORT Comparator {
public:
virtual ~Comparator();
// Return a builtin comparator that uses lexicographic byte-wise
// ordering. The result remains the property of this module and
// must not be deleted.
-extern const Comparator* BytewiseComparator();
+LEVELDB_EXPORT const Comparator* BytewiseComparator();
} // namespace leveldb
#include <stdint.h>
#include <stdio.h>
+#include "leveldb/export.h"
#include "leveldb/iterator.h"
#include "leveldb/options.h"
namespace leveldb {
-// Update Makefile if you change these
+// Update CMakeLists.txt if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 20;
+static const int kMinorVersion = 21;
struct Options;
struct ReadOptions;
// Abstract handle to particular state of a DB.
// A Snapshot is an immutable object and can therefore be safely
// accessed from multiple threads without any external synchronization.
-class Snapshot {
+class LEVELDB_EXPORT Snapshot {
protected:
virtual ~Snapshot();
};
// A range of keys
-struct Range {
+struct LEVELDB_EXPORT Range {
Slice start; // Included in the range
Slice limit; // Not included in the range
// A DB is a persistent ordered map from keys to values.
// A DB is safe for concurrent access from multiple threads without
// any external synchronization.
-class DB {
+class LEVELDB_EXPORT DB {
public:
// Open the database with the specified "name".
// Stores a pointer to a heap-allocated database in *dbptr and returns
// OK on success.
- // Stores NULL in *dbptr and returns a non-OK status on error.
+ // Stores nullptr in *dbptr and returns a non-OK status on error.
// Caller should delete *dbptr when it is no longer needed.
static Status Open(const Options& options,
const std::string& name,
DB** dbptr);
- DB() { }
+ DB() = default;
+
+ DB(const DB&) = delete;
+ DB& operator=(const DB&) = delete;
+
virtual ~DB();
// Set the database entry for "key" to "value". Returns OK on success,
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
- // begin==NULL is treated as a key before all keys in the database.
- // end==NULL is treated as a key after all keys in the database.
+ // begin==nullptr is treated as a key before all keys in the database.
+ // end==nullptr is treated as a key after all keys in the database.
// Therefore the following call will compact the entire database:
- // db->CompactRange(NULL, NULL);
+ // db->CompactRange(nullptr, nullptr);
virtual void CompactRange(const Slice* begin, const Slice* end) = 0;
-
- private:
- // No copying allowed
- DB(const DB&);
- void operator=(const DB&);
};
// Destroy the contents of the specified database.
// Be very careful using this method.
-Status DestroyDB(const std::string& name, const Options& options);
+//
+// Note: For backwards compatibility, if DestroyDB is unable to list the
+// database files, Status::OK() will still be returned masking this failure.
+LEVELDB_EXPORT Status DestroyDB(const std::string& name,
+ const Options& options);
// If a DB cannot be opened, you may attempt to call this method to
// resurrect as much of the contents of the database as possible.
// Some data may be lost, so be careful when calling this function
// on a database that contains important information.
-Status RepairDB(const std::string& dbname, const Options& options);
+LEVELDB_EXPORT Status RepairDB(const std::string& dbname,
+ const Options& options);
} // namespace leveldb
#include <string>
#include "leveldb/env.h"
+#include "leveldb/export.h"
#include "leveldb/status.h"
namespace leveldb {
//
// Returns a non-OK result if fname does not name a leveldb storage
// file, or if the file cannot be read.
-Status DumpFile(Env* env, const std::string& fname, WritableFile* dst);
+LEVELDB_EXPORT Status DumpFile(Env* env, const std::string& fname,
+ WritableFile* dst);
} // namespace leveldb
#ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
#define STORAGE_LEVELDB_INCLUDE_ENV_H_
-#include <string>
-#include <vector>
#include <stdarg.h>
#include <stdint.h>
+#include <string>
+#include <vector>
+#include "leveldb/export.h"
#include "leveldb/status.h"
+#if defined(_WIN32)
+// The leveldb::Env class below contains a DeleteFile method.
+// At the same time, <windows.h>, a fairly popular header
+// file for Windows applications, defines a DeleteFile macro.
+//
+// Without any intervention on our part, the result of this
+// unfortunate coincidence is that the name of the
+// leveldb::Env::DeleteFile method seen by the compiler depends on
+// whether <windows.h> was included before or after the LevelDB
+// headers.
+//
+// To avoid headaches, we undefined DeleteFile (if defined) and
+// redefine it at the bottom of this file. This way <windows.h>
+// can be included before this file (or not at all) and the
+// exported method will always be leveldb::Env::DeleteFile.
+#if defined(DeleteFile)
+#undef DeleteFile
+#define LEVELDB_DELETEFILE_UNDEFINED
+#endif // defined(DeleteFile)
+#endif // defined(_WIN32)
+
namespace leveldb {
class FileLock;
class Slice;
class WritableFile;
-class Env {
+class LEVELDB_EXPORT Env {
public:
- Env() { }
+ Env() = default;
+
+ Env(const Env&) = delete;
+ Env& operator=(const Env&) = delete;
+
virtual ~Env();
// Return a default environment suitable for the current operating
// The result of Default() belongs to leveldb and must never be deleted.
static Env* Default();
- // Create a brand new sequentially-readable file with the specified name.
+ // Create an object that sequentially reads the file with the specified name.
// On success, stores a pointer to the new file in *result and returns OK.
- // On failure stores NULL in *result and returns non-OK. If the file does
- // not exist, returns a non-OK status.
+ // On failure stores nullptr in *result and returns non-OK. If the file does
+ // not exist, returns a non-OK status. Implementations should return a
+ // NotFound status when the file does not exist.
//
// The returned file will only be accessed by one thread at a time.
virtual Status NewSequentialFile(const std::string& fname,
SequentialFile** result) = 0;
- // Create a brand new random access read-only file with the
+ // Create an object supporting random-access reads from the file with the
// specified name. On success, stores a pointer to the new file in
- // *result and returns OK. On failure stores NULL in *result and
+ // *result and returns OK. On failure stores nullptr in *result and
// returns non-OK. If the file does not exist, returns a non-OK
- // status.
+ // status. Implementations should return a NotFound status when the file does
+ // not exist.
//
// The returned file may be concurrently accessed by multiple threads.
virtual Status NewRandomAccessFile(const std::string& fname,
// Create an object that writes to a new file with the specified
// name. Deletes any existing file with the same name and creates a
// new file. On success, stores a pointer to the new file in
- // *result and returns OK. On failure stores NULL in *result and
+ // *result and returns OK. On failure stores nullptr in *result and
// returns non-OK.
//
// The returned file will only be accessed by one thread at a time.
// Create an object that either appends to an existing file, or
// writes to a new file (if the file does not exist to begin with).
// On success, stores a pointer to the new file in *result and
- // returns OK. On failure stores NULL in *result and returns
+ // returns OK. On failure stores nullptr in *result and returns
// non-OK.
//
// The returned file will only be accessed by one thread at a time.
const std::string& target) = 0;
// Lock the specified file. Used to prevent concurrent access to
- // the same db by multiple processes. On failure, stores NULL in
+ // the same db by multiple processes. On failure, stores nullptr in
// *lock and returns non-OK.
//
// On success, stores a pointer to the object that represents the
// Sleep/delay the thread for the prescribed number of micro-seconds.
virtual void SleepForMicroseconds(int micros) = 0;
-
- private:
- // No copying allowed
- Env(const Env&);
- void operator=(const Env&);
};
// A file abstraction for reading sequentially through a file
-class SequentialFile {
+class LEVELDB_EXPORT SequentialFile {
public:
- SequentialFile() { }
+ SequentialFile() = default;
+
+ SequentialFile(const SequentialFile&) = delete;
+ SequentialFile& operator=(const SequentialFile&) = delete;
+
virtual ~SequentialFile();
// Read up to "n" bytes from the file. "scratch[0..n-1]" may be
//
// REQUIRES: External synchronization
virtual Status Skip(uint64_t n) = 0;
-
- private:
- // No copying allowed
- SequentialFile(const SequentialFile&);
- void operator=(const SequentialFile&);
};
// A file abstraction for randomly reading the contents of a file.
-class RandomAccessFile {
+class LEVELDB_EXPORT RandomAccessFile {
public:
- RandomAccessFile() { }
+ RandomAccessFile() = default;
+
+ RandomAccessFile(const RandomAccessFile&) = delete;
+ RandomAccessFile& operator=(const RandomAccessFile&) = delete;
+
virtual ~RandomAccessFile();
// Read up to "n" bytes from the file starting at "offset".
// Safe for concurrent use by multiple threads.
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const = 0;
-
- private:
- // No copying allowed
- RandomAccessFile(const RandomAccessFile&);
- void operator=(const RandomAccessFile&);
};
// A file abstraction for sequential writing. The implementation
// must provide buffering since callers may append small fragments
// at a time to the file.
-class WritableFile {
+class LEVELDB_EXPORT WritableFile {
public:
- WritableFile() { }
+ WritableFile() = default;
+
+ WritableFile(const WritableFile&) = delete;
+ WritableFile& operator=(const WritableFile&) = delete;
+
virtual ~WritableFile();
virtual Status Append(const Slice& data) = 0;
virtual Status Close() = 0;
virtual Status Flush() = 0;
virtual Status Sync() = 0;
-
- private:
- // No copying allowed
- WritableFile(const WritableFile&);
- void operator=(const WritableFile&);
};
// An interface for writing log messages.
-class Logger {
+class LEVELDB_EXPORT Logger {
public:
- Logger() { }
+ Logger() = default;
+
+ Logger(const Logger&) = delete;
+ Logger& operator=(const Logger&) = delete;
+
virtual ~Logger();
// Write an entry to the log file with the specified format.
virtual void Logv(const char* format, va_list ap) = 0;
-
- private:
- // No copying allowed
- Logger(const Logger&);
- void operator=(const Logger&);
};
-
// Identifies a locked file.
-class FileLock {
+class LEVELDB_EXPORT FileLock {
public:
- FileLock() { }
+ FileLock() = default;
+
+ FileLock(const FileLock&) = delete;
+ FileLock& operator=(const FileLock&) = delete;
+
virtual ~FileLock();
- private:
- // No copying allowed
- FileLock(const FileLock&);
- void operator=(const FileLock&);
};
-// Log the specified data to *info_log if info_log is non-NULL.
-extern void Log(Logger* info_log, const char* format, ...)
+// Log the specified data to *info_log if info_log is non-null.
+void Log(Logger* info_log, const char* format, ...)
# if defined(__GNUC__) || defined(__clang__)
__attribute__((__format__ (__printf__, 2, 3)))
# endif
;
// A utility routine: write "data" to the named file.
-extern Status WriteStringToFile(Env* env, const Slice& data,
- const std::string& fname);
+LEVELDB_EXPORT Status WriteStringToFile(Env* env, const Slice& data,
+ const std::string& fname);
// A utility routine: read contents of named file into *data
-extern Status ReadFileToString(Env* env, const std::string& fname,
- std::string* data);
+LEVELDB_EXPORT Status ReadFileToString(Env* env, const std::string& fname,
+ std::string* data);
// An implementation of Env that forwards all calls to another Env.
// May be useful to clients who wish to override just part of the
// functionality of another Env.
-class EnvWrapper : public Env {
+class LEVELDB_EXPORT EnvWrapper : public Env {
public:
- // Initialize an EnvWrapper that delegates all calls to *t
+ // Initialize an EnvWrapper that delegates all calls to *t.
explicit EnvWrapper(Env* t) : target_(t) { }
virtual ~EnvWrapper();
- // Return the target to which this Env forwards all calls
+ // Return the target to which this Env forwards all calls.
Env* target() const { return target_; }
- // The following text is boilerplate that forwards all methods to target()
- Status NewSequentialFile(const std::string& f, SequentialFile** r) {
+ // The following text is boilerplate that forwards all methods to target().
+ Status NewSequentialFile(const std::string& f, SequentialFile** r) override {
return target_->NewSequentialFile(f, r);
}
- Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
+ Status NewRandomAccessFile(const std::string& f,
+ RandomAccessFile** r) override {
return target_->NewRandomAccessFile(f, r);
}
- Status NewWritableFile(const std::string& f, WritableFile** r) {
+ Status NewWritableFile(const std::string& f, WritableFile** r) override {
return target_->NewWritableFile(f, r);
}
- Status NewAppendableFile(const std::string& f, WritableFile** r) {
+ Status NewAppendableFile(const std::string& f, WritableFile** r) override {
return target_->NewAppendableFile(f, r);
}
- bool FileExists(const std::string& f) { return target_->FileExists(f); }
- Status GetChildren(const std::string& dir, std::vector<std::string>* r) {
+ bool FileExists(const std::string& f) override {
+ return target_->FileExists(f);
+ }
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* r) override {
return target_->GetChildren(dir, r);
}
- Status DeleteFile(const std::string& f) { return target_->DeleteFile(f); }
- Status CreateDir(const std::string& d) { return target_->CreateDir(d); }
- Status DeleteDir(const std::string& d) { return target_->DeleteDir(d); }
- Status GetFileSize(const std::string& f, uint64_t* s) {
+ Status DeleteFile(const std::string& f) override {
+ return target_->DeleteFile(f);
+ }
+ Status CreateDir(const std::string& d) override {
+ return target_->CreateDir(d);
+ }
+ Status DeleteDir(const std::string& d) override {
+ return target_->DeleteDir(d);
+ }
+ Status GetFileSize(const std::string& f, uint64_t* s) override {
return target_->GetFileSize(f, s);
}
- Status RenameFile(const std::string& s, const std::string& t) {
+ Status RenameFile(const std::string& s, const std::string& t) override {
return target_->RenameFile(s, t);
}
- Status LockFile(const std::string& f, FileLock** l) {
+ Status LockFile(const std::string& f, FileLock** l) override {
return target_->LockFile(f, l);
}
- Status UnlockFile(FileLock* l) { return target_->UnlockFile(l); }
- void Schedule(void (*f)(void*), void* a) {
+ Status UnlockFile(FileLock* l) override { return target_->UnlockFile(l); }
+ void Schedule(void (*f)(void*), void* a) override {
return target_->Schedule(f, a);
}
- void StartThread(void (*f)(void*), void* a) {
+ void StartThread(void (*f)(void*), void* a) override {
return target_->StartThread(f, a);
}
- virtual Status GetTestDirectory(std::string* path) {
+ Status GetTestDirectory(std::string* path) override {
return target_->GetTestDirectory(path);
}
- virtual Status NewLogger(const std::string& fname, Logger** result) {
+ Status NewLogger(const std::string& fname, Logger** result) override {
return target_->NewLogger(fname, result);
}
- uint64_t NowMicros() {
+ uint64_t NowMicros() override {
return target_->NowMicros();
}
- void SleepForMicroseconds(int micros) {
+ void SleepForMicroseconds(int micros) override {
target_->SleepForMicroseconds(micros);
}
+
private:
Env* target_;
};
} // namespace leveldb
+// Redefine DeleteFile if necessary.
+#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+#if defined(UNICODE)
+#define DeleteFile DeleteFileW
+#else
+#define DeleteFile DeleteFileA
+#endif // defined(UNICODE)
+#endif // defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
+
#endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
--- /dev/null
+// Copyright (c) 2017 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_INCLUDE_EXPORT_H_
+#define STORAGE_LEVELDB_INCLUDE_EXPORT_H_
+
+#if !defined(LEVELDB_EXPORT)
+
+#if defined(LEVELDB_SHARED_LIBRARY)
+#if defined(_WIN32)
+
+#if defined(LEVELDB_COMPILE_LIBRARY)
+#define LEVELDB_EXPORT __declspec(dllexport)
+#else
+#define LEVELDB_EXPORT __declspec(dllimport)
+#endif // defined(LEVELDB_COMPILE_LIBRARY)
+
+#else // defined(_WIN32)
+#if defined(LEVELDB_COMPILE_LIBRARY)
+#define LEVELDB_EXPORT __attribute__((visibility("default")))
+#else
+#define LEVELDB_EXPORT
+#endif
+#endif // defined(_WIN32)
+
+#else // defined(LEVELDB_SHARED_LIBRARY)
+#define LEVELDB_EXPORT
+#endif
+
+#endif // !defined(LEVELDB_EXPORT)
+
+#endif // STORAGE_LEVELDB_INCLUDE_EXPORT_H_
#define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
#include <string>
+#include "leveldb/export.h"
namespace leveldb {
class Slice;
-class FilterPolicy {
+class LEVELDB_EXPORT FilterPolicy {
public:
virtual ~FilterPolicy();
// ignores trailing spaces, it would be incorrect to use a
// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
// trailing spaces in keys.
-extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
+LEVELDB_EXPORT const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
#ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
#define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
+#include "leveldb/export.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
namespace leveldb {
-class Iterator {
+class LEVELDB_EXPORT Iterator {
public:
Iterator();
+
+ Iterator(const Iterator&) = delete;
+ Iterator& operator=(const Iterator&) = delete;
+
virtual ~Iterator();
// An iterator is either positioned at a key/value pair, or
//
// Note that unlike all of the preceding methods, this method is
// not abstract and therefore clients should not override it.
- typedef void (*CleanupFunction)(void* arg1, void* arg2);
+ using CleanupFunction = void (*)(void* arg1, void* arg2);
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
private:
- struct Cleanup {
+ // Cleanup functions are stored in a single-linked list.
+ // The list's head node is inlined in the iterator.
+ struct CleanupNode {
+ // The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
- Cleanup* next;
- };
- Cleanup cleanup_;
+ CleanupNode* next;
- // No copying allowed
- Iterator(const Iterator&);
- void operator=(const Iterator&);
+ // True if the node is not used. Only head nodes might be unused.
+ bool IsEmpty() const { return function == nullptr; }
+ // Invokes the cleanup function.
+ void Run() { assert(function != nullptr); (*function)(arg1, arg2); }
+ };
+ CleanupNode cleanup_head_;
};
// Return an empty iterator (yields nothing).
-extern Iterator* NewEmptyIterator();
+LEVELDB_EXPORT Iterator* NewEmptyIterator();
// Return an empty iterator with the specified status.
-extern Iterator* NewErrorIterator(const Status& status);
+LEVELDB_EXPORT Iterator* NewErrorIterator(const Status& status);
} // namespace leveldb
#define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
#include <stddef.h>
+#include "leveldb/export.h"
namespace leveldb {
};
// Options to control the behavior of a database (passed to DB::Open)
-struct Options {
+struct LEVELDB_EXPORT Options {
// -------------------
// Parameters that affect behavior
const Comparator* comparator;
// If true, the database will be created if it is missing.
- // Default: false
- bool create_if_missing;
+ bool create_if_missing = false;
// If true, an error is raised if the database already exists.
- // Default: false
- bool error_if_exists;
+ bool error_if_exists = false;
// If true, the implementation will do aggressive checking of the
// data it is processing and will stop early if it detects any
// errors. This may have unforeseen ramifications: for example, a
// corruption of one DB entry may cause a large number of entries to
// become unreadable or for the entire DB to become unopenable.
- // Default: false
- bool paranoid_checks;
+ bool paranoid_checks = false;
// Use the specified object to interact with the environment,
// e.g. to read/write files, schedule background work, etc.
Env* env;
// Any internal progress/error information generated by the db will
- // be written to info_log if it is non-NULL, or to a file stored
- // in the same directory as the DB contents if info_log is NULL.
- // Default: NULL
- Logger* info_log;
+ // be written to info_log if it is non-null, or to a file stored
+ // in the same directory as the DB contents if info_log is null.
+ Logger* info_log = nullptr;
// -------------------
// Parameters that affect performance
// so you may wish to adjust this parameter to control memory usage.
// Also, a larger write buffer will result in a longer recovery time
// the next time the database is opened.
- //
- // Default: 4MB
- size_t write_buffer_size;
+ size_t write_buffer_size = 4 * 1024 * 1024;
// Number of open files that can be used by the DB. You may need to
// increase this if your database has a large working set (budget
// one open file per 2MB of working set).
- //
- // Default: 1000
- int max_open_files;
+ int max_open_files = 1000;
// Control over blocks (user data is stored in a set of blocks, and
// a block is the unit of reading from disk).
- // If non-NULL, use the specified cache for blocks.
- // If NULL, leveldb will automatically create and use an 8MB internal cache.
- // Default: NULL
- Cache* block_cache;
+ // If non-null, use the specified cache for blocks.
+ // If null, leveldb will automatically create and use an 8MB internal cache.
+ Cache* block_cache = nullptr;
// Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically.
- //
- // Default: 4K
- size_t block_size;
+ size_t block_size = 4 * 1024;
// Number of keys between restart points for delta encoding of keys.
// This parameter can be changed dynamically. Most clients should
// leave this parameter alone.
- //
- // Default: 16
- int block_restart_interval;
+ int block_restart_interval = 16;
// Leveldb will write up to this amount of bytes to a file before
// switching to a new one.
// compactions and hence longer latency/performance hiccups.
// Another reason to increase this parameter might be when you are
// initially populating a large database.
- //
- // Default: 2MB
- size_t max_file_size;
+ size_t max_file_size = 2 * 1024 * 1024;
// Compress blocks using the specified compression algorithm. This
// parameter can be changed dynamically.
// worth switching to kNoCompression. Even if the input data is
// incompressible, the kSnappyCompression implementation will
// efficiently detect that and will switch to uncompressed mode.
- CompressionType compression;
+ CompressionType compression = kSnappyCompression;
// EXPERIMENTAL: If true, append to existing MANIFEST and log files
// when a database is opened. This can significantly speed up open.
//
// Default: currently false, but may become true later.
- bool reuse_logs;
+ bool reuse_logs = false;
- // If non-NULL, use the specified filter policy to reduce disk reads.
+ // If non-null, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
- //
- // Default: NULL
- const FilterPolicy* filter_policy;
+ const FilterPolicy* filter_policy = nullptr;
// Create an Options object with default values for all fields.
Options();
};
// Options that control read operations
-struct ReadOptions {
+struct LEVELDB_EXPORT ReadOptions {
// If true, all data read from underlying storage will be
// verified against corresponding checksums.
- // Default: false
- bool verify_checksums;
+ bool verify_checksums = false;
// Should the data read for this iteration be cached in memory?
// Callers may wish to set this field to false for bulk scans.
- // Default: true
- bool fill_cache;
+ bool fill_cache = true;
- // If "snapshot" is non-NULL, read as of the supplied snapshot
+ // If "snapshot" is non-null, read as of the supplied snapshot
// (which must belong to the DB that is being read and which must
- // not have been released). If "snapshot" is NULL, use an implicit
+ // not have been released). If "snapshot" is null, use an implicit
// snapshot of the state at the beginning of this read operation.
- // Default: NULL
- const Snapshot* snapshot;
-
- ReadOptions()
- : verify_checksums(false),
- fill_cache(true),
- snapshot(NULL) {
- }
+ const Snapshot* snapshot = nullptr;
+
+ ReadOptions() = default;
};
// Options that control write operations
-struct WriteOptions {
+struct LEVELDB_EXPORT WriteOptions {
// If true, the write will be flushed from the operating system
// buffer cache (by calling WritableFile::Sync()) before the write
// is considered complete. If this flag is true, writes will be
// crash semantics as the "write()" system call. A DB write
// with sync==true has similar crash semantics to a "write()"
// system call followed by "fsync()".
- //
- // Default: false
- bool sync;
+ bool sync = false;
- WriteOptions()
- : sync(false) {
- }
+ WriteOptions() = default;
};
} // namespace leveldb
#include <stddef.h>
#include <string.h>
#include <string>
+#include "leveldb/export.h"
namespace leveldb {
-class Slice {
+class LEVELDB_EXPORT Slice {
public:
// Create an empty slice.
Slice() : data_(""), size_(0) { }
// Create a slice that refers to s[0,strlen(s)-1]
Slice(const char* s) : data_(s), size_(strlen(s)) { }
+ // Intentionally copyable.
+ Slice(const Slice&) = default;
+ Slice& operator=(const Slice&) = default;
+
// Return a pointer to the beginning of the referenced data
const char* data() const { return data_; }
private:
const char* data_;
size_t size_;
-
- // Intentionally copyable
};
inline bool operator==(const Slice& x, const Slice& y) {
#ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_
#define STORAGE_LEVELDB_INCLUDE_STATUS_H_
+#include <algorithm>
#include <string>
+#include "leveldb/export.h"
#include "leveldb/slice.h"
namespace leveldb {
-class Status {
+class LEVELDB_EXPORT Status {
public:
// Create a success status.
- Status() : state_(NULL) { }
+ Status() noexcept : state_(nullptr) { }
~Status() { delete[] state_; }
- // Copy the specified status.
- Status(const Status& s);
- void operator=(const Status& s);
+ Status(const Status& rhs);
+ Status& operator=(const Status& rhs);
+
+ Status(Status&& rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; }
+ Status& operator=(Status&& rhs) noexcept;
// Return a success status.
static Status OK() { return Status(); }
}
// Returns true iff the status indicates success.
- bool ok() const { return (state_ == NULL); }
+ bool ok() const { return (state_ == nullptr); }
// Returns true iff the status indicates a NotFound error.
bool IsNotFound() const { return code() == kNotFound; }
std::string ToString() const;
private:
- // OK status has a NULL state_. Otherwise, state_ is a new[] array
+ // OK status has a null state_. Otherwise, state_ is a new[] array
// of the following form:
// state_[0..3] == length of message
// state_[4] == code
};
Code code() const {
- return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]);
+ return (state_ == nullptr) ? kOk : static_cast<Code>(state_[4]);
}
Status(Code code, const Slice& msg, const Slice& msg2);
static const char* CopyState(const char* s);
};
-inline Status::Status(const Status& s) {
- state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
+inline Status::Status(const Status& rhs) {
+ state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
}
-inline void Status::operator=(const Status& s) {
- // The following condition catches both aliasing (when this == &s),
- // and the common case where both s and *this are ok.
- if (state_ != s.state_) {
+inline Status& Status::operator=(const Status& rhs) {
+ // The following condition catches both aliasing (when this == &rhs),
+ // and the common case where both rhs and *this are ok.
+ if (state_ != rhs.state_) {
delete[] state_;
- state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_);
+ state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_);
}
+ return *this;
+}
+inline Status& Status::operator=(Status&& rhs) noexcept {
+ std::swap(state_, rhs.state_);
+ return *this;
}
} // namespace leveldb
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
#include <stdint.h>
+#include "leveldb/export.h"
#include "leveldb/iterator.h"
namespace leveldb {
// A Table is a sorted map from strings to strings. Tables are
// immutable and persistent. A Table may be safely accessed from
// multiple threads without external synchronization.
-class Table {
+class LEVELDB_EXPORT Table {
public:
// Attempt to open the table that is stored in bytes [0..file_size)
// of "file", and read the metadata entries necessary to allow
// If successful, returns ok and sets "*table" to the newly opened
// table. The client should delete "*table" when no longer needed.
// If there was an error while initializing the table, sets "*table"
- // to NULL and returns a non-ok status. Does not take ownership of
+ // to nullptr and returns a non-ok status. Does not take ownership of
// "*source", but the client must ensure that "source" remains live
// for the duration of the returned table's lifetime.
//
uint64_t file_size,
Table** table);
+ Table(const Table&) = delete;
+ void operator=(const Table&) = delete;
+
~Table();
// Returns a new iterator over the table contents.
void ReadMeta(const Footer& footer);
void ReadFilter(const Slice& filter_handle_value);
-
- // No copying allowed
- Table(const Table&);
- void operator=(const Table&);
};
} // namespace leveldb
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
#include <stdint.h>
+#include "leveldb/export.h"
#include "leveldb/options.h"
#include "leveldb/status.h"
class BlockHandle;
class WritableFile;
-class TableBuilder {
+class LEVELDB_EXPORT TableBuilder {
public:
// Create a builder that will store the contents of the table it is
// building in *file. Does not close the file. It is up to the
// caller to close the file after calling Finish().
TableBuilder(const Options& options, WritableFile* file);
+ TableBuilder(const TableBuilder&) = delete;
+ void operator=(const TableBuilder&) = delete;
+
// REQUIRES: Either Finish() or Abandon() has been called.
~TableBuilder();
struct Rep;
Rep* rep_;
-
- // No copying allowed
- TableBuilder(const TableBuilder&);
- void operator=(const TableBuilder&);
};
} // namespace leveldb
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
#include <string>
+#include "leveldb/export.h"
#include "leveldb/status.h"
namespace leveldb {
class Slice;
-class WriteBatch {
+class LEVELDB_EXPORT WriteBatch {
public:
WriteBatch();
+
+ // Intentionally copyable.
+ WriteBatch(const WriteBatch&) = default;
+ WriteBatch& operator =(const WriteBatch&) = default;
+
~WriteBatch();
// Store the mapping "key->value" in the database.
// Clear all updates buffered in this batch.
void Clear();
+ // The size of the database changes caused by this batch.
+ //
+ // This number is tied to implementation details, and may change across
+ // releases. It is intended for LevelDB usage metrics.
+ size_t ApproximateSize() const;
+
+ // Copies the operations in "source" to this batch.
+ //
+ // This runs in O(source size) time. However, the constant factor is better
+ // than calling Iterate() over the source batch with a Handler that replicates
+ // the operations into this batch.
+ void Append(const WriteBatch& source);
+
// Support for iterating over the contents of a batch.
class Handler {
public:
friend class WriteBatchInternal;
std::string rep_; // See comment in write_batch.cc for the format of rep_
-
- // Intentionally copyable
};
} // namespace leveldb
"port.h" in turn includes a platform specific "port_<platform>.h" file
that provides the platform specific implementation.
-See port_posix.h for an example of what must be provided in a platform
+See port_stdcxx.h for an example of what must be provided in a platform
specific header file.
+++ /dev/null
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// AtomicPointer provides storage for a lock-free pointer.
-// Platform-dependent implementation of AtomicPointer:
-// - If the platform provides a cheap barrier, we use it with raw pointers
-// - If <atomic> is present (on newer versions of gcc, it is), we use
-// a <atomic>-based AtomicPointer. However we prefer the memory
-// barrier based version, because at least on a gcc 4.4 32-bit build
-// on linux, we have encountered a buggy <atomic> implementation.
-// Also, some <atomic> implementations are much slower than a memory-barrier
-// based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for
-// a barrier based acquire-load).
-// This code is based on atomicops-internals-* in Google's perftools:
-// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
-
-#ifndef PORT_ATOMIC_POINTER_H_
-#define PORT_ATOMIC_POINTER_H_
-
-#include <stdint.h>
-#ifdef LEVELDB_ATOMIC_PRESENT
-#include <atomic>
-#endif
-#ifdef OS_WIN
-#include <windows.h>
-#endif
-#ifdef OS_MACOSX
-#include <libkern/OSAtomic.h>
-#endif
-
-#if defined(_M_X64) || defined(__x86_64__)
-#define ARCH_CPU_X86_FAMILY 1
-#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
-#define ARCH_CPU_X86_FAMILY 1
-#elif defined(__ARMEL__)
-#define ARCH_CPU_ARM_FAMILY 1
-#elif defined(__aarch64__)
-#define ARCH_CPU_ARM64_FAMILY 1
-#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
-#define ARCH_CPU_PPC_FAMILY 1
-#elif defined(__mips__)
-#define ARCH_CPU_MIPS_FAMILY 1
-#endif
-
-namespace leveldb {
-namespace port {
-
-// Define MemoryBarrier() if available
-// Windows on x86
-#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
-// windows.h already provides a MemoryBarrier(void) macro
-// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Mac OS
-#elif defined(OS_MACOSX)
-inline void MemoryBarrier() {
- OSMemoryBarrier();
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Gcc on x86
-#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
- // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
- __asm__ __volatile__("" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// Sun Studio
-#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
-inline void MemoryBarrier() {
- // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
- // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
- asm volatile("" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// ARM Linux
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-// The Linux ARM kernel provides a highly optimized device-specific memory
-// barrier function at a fixed memory address that is mapped in every
-// user-level process.
-//
-// This beats using CPU-specific instructions which are, on single-core
-// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
-// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
-// shows that the extra function call cost is completely negligible on
-// multi-core devices.
-//
-inline void MemoryBarrier() {
- (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// ARM64
-#elif defined(ARCH_CPU_ARM64_FAMILY)
-inline void MemoryBarrier() {
- asm volatile("dmb sy" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// PPC
-#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- // TODO for some powerpc expert: is there a cheaper suitable variant?
- // Perhaps by having separate barriers for acquire and release ops.
- asm volatile("sync" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-// MIPS
-#elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__)
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-#define LEVELDB_HAVE_MEMORY_BARRIER
-
-#endif
-
-// AtomicPointer built using platform-specific MemoryBarrier()
-#if defined(LEVELDB_HAVE_MEMORY_BARRIER)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* p) : rep_(p) {}
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
- inline void* Acquire_Load() const {
- void* result = rep_;
- MemoryBarrier();
- return result;
- }
- inline void Release_Store(void* v) {
- MemoryBarrier();
- rep_ = v;
- }
-};
-
-// AtomicPointer based on <cstdatomic>
-#elif defined(LEVELDB_ATOMIC_PRESENT)
-class AtomicPointer {
- private:
- std::atomic<void*> rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- return rep_.load(std::memory_order_acquire);
- }
- inline void Release_Store(void* v) {
- rep_.store(v, std::memory_order_release);
- }
- inline void* NoBarrier_Load() const {
- return rep_.load(std::memory_order_relaxed);
- }
- inline void NoBarrier_Store(void* v) {
- rep_.store(v, std::memory_order_relaxed);
- }
-};
-
-// Atomic pointer based on sparc memory barriers
-#elif defined(__sparcv9) && defined(__GNUC__)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- void* val;
- __asm__ __volatile__ (
- "ldx [%[rep_]], %[val] \n\t"
- "membar #LoadLoad|#LoadStore \n\t"
- : [val] "=r" (val)
- : [rep_] "r" (&rep_)
- : "memory");
- return val;
- }
- inline void Release_Store(void* v) {
- __asm__ __volatile__ (
- "membar #LoadStore|#StoreStore \n\t"
- "stx %[v], [%[rep_]] \n\t"
- :
- : [rep_] "r" (&rep_), [v] "r" (v)
- : "memory");
- }
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
-};
-
-// Atomic pointer based on ia64 acq/rel
-#elif defined(__ia64) && defined(__GNUC__)
-class AtomicPointer {
- private:
- void* rep_;
- public:
- AtomicPointer() { }
- explicit AtomicPointer(void* v) : rep_(v) { }
- inline void* Acquire_Load() const {
- void* val ;
- __asm__ __volatile__ (
- "ld8.acq %[val] = [%[rep_]] \n\t"
- : [val] "=r" (val)
- : [rep_] "r" (&rep_)
- : "memory"
- );
- return val;
- }
- inline void Release_Store(void* v) {
- __asm__ __volatile__ (
- "st8.rel [%[rep_]] = %[v] \n\t"
- :
- : [rep_] "r" (&rep_), [v] "r" (v)
- : "memory"
- );
- }
- inline void* NoBarrier_Load() const { return rep_; }
- inline void NoBarrier_Store(void* v) { rep_ = v; }
-};
-
-// We have neither MemoryBarrier(), nor <atomic>
-#else
-#error Please implement AtomicPointer for this platform.
-
-#endif
-
-#undef LEVELDB_HAVE_MEMORY_BARRIER
-#undef ARCH_CPU_X86_FAMILY
-#undef ARCH_CPU_ARM_FAMILY
-#undef ARCH_CPU_ARM64_FAMILY
-#undef ARCH_CPU_PPC_FAMILY
-
-} // namespace port
-} // namespace leveldb
-
-#endif // PORT_ATOMIC_POINTER_H_
// Include the appropriate platform specific file below. If you are
// porting to a new platform, see "port_example.h" for documentation
// of what the new port_<platform>.h file must provide.
-#if defined(LEVELDB_PLATFORM_POSIX)
-# include "port/port_posix.h"
+#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
+# include "port/port_stdcxx.h"
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
# include "port/port_chromium.h"
#endif
--- /dev/null
+// Copyright 2017 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
+#define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
+
+// Define to 1 if you have a definition for fdatasync() in <unistd.h>.
+#if !defined(HAVE_FDATASYNC)
+#cmakedefine01 HAVE_FDATASYNC
+#endif // !defined(HAVE_FDATASYNC)
+
+// Define to 1 if you have a definition for F_FULLFSYNC in <fcntl.h>.
+#if !defined(HAVE_FULLFSYNC)
+#cmakedefine01 HAVE_FULLFSYNC
+#endif // !defined(HAVE_FULLFSYNC)
+
+// Define to 1 if you have Google CRC32C.
+#if !defined(HAVE_CRC32C)
+#cmakedefine01 HAVE_CRC32C
+#endif // !defined(HAVE_CRC32C)
+
+// Define to 1 if you have Google Snappy.
+#if !defined(HAVE_SNAPPY)
+#cmakedefine01 HAVE_SNAPPY
+#endif // !defined(HAVE_SNAPPY)
+
+// Define to 1 if your processor stores words with the most significant byte
+// first (like Motorola and SPARC, unlike Intel and VAX).
+#if !defined(LEVELDB_IS_BIG_ENDIAN)
+#cmakedefine01 LEVELDB_IS_BIG_ENDIAN
+#endif // !defined(LEVELDB_IS_BIG_ENDIAN)
+
+#endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
\ No newline at end of file
#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
+#include "port/thread_annotations.h"
+
namespace leveldb {
namespace port {
// ------------------ Threading -------------------
// A Mutex represents an exclusive lock.
-class Mutex {
+class LOCKABLE Mutex {
public:
Mutex();
~Mutex();
// Lock the mutex. Waits until other lockers have exited.
// Will deadlock if the mutex is already locked by this thread.
- void Lock();
+ void Lock() EXCLUSIVE_LOCK_FUNCTION();
// Unlock the mutex.
// REQUIRES: This mutex was locked by this thread.
- void Unlock();
+ void Unlock() UNLOCK_FUNCTION();
// Optionally crash if this thread does not hold this mutex.
// The implementation must be fast, especially if NDEBUG is
// defined. The implementation is allowed to skip all checks.
- void AssertHeld();
+ void AssertHeld() ASSERT_EXCLUSIVE_LOCK();
};
class CondVar {
void SignallAll();
};
-// Thread-safe initialization.
-// Used as follows:
-// static port::OnceType init_control = LEVELDB_ONCE_INIT;
-// static void Initializer() { ... do something ...; }
-// ...
-// port::InitOnce(&init_control, &Initializer);
-typedef intptr_t OnceType;
-#define LEVELDB_ONCE_INIT 0
-extern void InitOnce(port::OnceType*, void (*initializer)());
-
-// A type that holds a pointer that can be read or written atomically
-// (i.e., without word-tearing.)
-class AtomicPointer {
- private:
- intptr_t rep_;
- public:
- // Initialize to arbitrary value
- AtomicPointer();
-
- // Initialize to hold v
- explicit AtomicPointer(void* v) : rep_(v) { }
-
- // Read and return the stored pointer with the guarantee that no
- // later memory access (read or write) by this thread can be
- // reordered ahead of this read.
- void* Acquire_Load() const;
-
- // Set v as the stored pointer with the guarantee that no earlier
- // memory access (read or write) by this thread can be reordered
- // after this store.
- void Release_Store(void* v);
-
- // Read the stored pointer with no ordering guarantees.
- void* NoBarrier_Load() const;
-
- // Set va as the stored pointer with no ordering guarantees.
- void NoBarrier_Store(void* v);
-};
-
// ------------------ Compression -------------------
// Store the snappy compression of "input[0,input_length-1]" in *output.
// Returns false if snappy is not supported by this port.
-extern bool Snappy_Compress(const char* input, size_t input_length,
- std::string* output);
+bool Snappy_Compress(const char* input, size_t input_length,
+ std::string* output);
// If input[0,input_length-1] looks like a valid snappy compressed
// buffer, store the size of the uncompressed data in *result and
// return true. Else return false.
-extern bool Snappy_GetUncompressedLength(const char* input, size_t length,
- size_t* result);
+bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result);
// Attempt to snappy uncompress input[0,input_length-1] into *output.
// Returns true if successful, false if the input is invalid lightweight
// REQUIRES: at least the first "n" bytes of output[] must be writable
// where "n" is the result of a successful call to
// Snappy_GetUncompressedLength.
-extern bool Snappy_Uncompress(const char* input_data, size_t input_length,
- char* output);
+bool Snappy_Uncompress(const char* input_data, size_t input_length,
+ char* output);
// ------------------ Miscellaneous -------------------
// If heap profiling is not supported, returns false.
// Else repeatedly calls (*func)(arg, data, n) and then returns true.
// The concatenation of all "data[0,n-1]" fragments is the heap profile.
-extern bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
+bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg);
// Extend the CRC to include the first n bytes of buf.
//
+++ /dev/null
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-#include "port/port_posix.h"
-
-#include <cstdlib>
-#include <stdio.h>
-#include <string.h>
-
-namespace leveldb {
-namespace port {
-
-static void PthreadCall(const char* label, int result) {
- if (result != 0) {
- fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
- abort();
- }
-}
-
-Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); }
-
-Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); }
-
-void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); }
-
-void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); }
-
-CondVar::CondVar(Mutex* mu)
- : mu_(mu) {
- PthreadCall("init cv", pthread_cond_init(&cv_, NULL));
-}
-
-CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); }
-
-void CondVar::Wait() {
- PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_));
-}
-
-void CondVar::Signal() {
- PthreadCall("signal", pthread_cond_signal(&cv_));
-}
-
-void CondVar::SignalAll() {
- PthreadCall("broadcast", pthread_cond_broadcast(&cv_));
-}
-
-void InitOnce(OnceType* once, void (*initializer)()) {
- PthreadCall("once", pthread_once(once, initializer));
-}
-
-} // namespace port
-} // namespace leveldb
+++ /dev/null
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// See port_example.h for documentation for the following types/functions.
-
-#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_
-#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_
-
-#undef PLATFORM_IS_LITTLE_ENDIAN
-#if defined(OS_MACOSX)
- #include <machine/endian.h>
- #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER)
- #define PLATFORM_IS_LITTLE_ENDIAN \
- (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN)
- #endif
-#elif defined(OS_SOLARIS)
- #include <sys/isa_defs.h>
- #ifdef _LITTLE_ENDIAN
- #define PLATFORM_IS_LITTLE_ENDIAN true
- #else
- #define PLATFORM_IS_LITTLE_ENDIAN false
- #endif
-#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) ||\
- defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD)
- #include <sys/types.h>
- #include <sys/endian.h>
- #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#elif defined(OS_HPUX)
- #define PLATFORM_IS_LITTLE_ENDIAN false
-#elif defined(OS_ANDROID)
- // Due to a bug in the NDK x86 <sys/endian.h> definition,
- // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android.
- // See http://code.google.com/p/android/issues/detail?id=39824
- #include <endian.h>
- #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN)
-#else
- #include <endian.h>
-#endif
-
-#include <pthread.h>
-#ifdef SNAPPY
-#include <snappy.h>
-#endif
-#include <stdint.h>
-#include <string>
-#include "port/atomic_pointer.h"
-
-#ifndef PLATFORM_IS_LITTLE_ENDIAN
-#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
-#endif
-
-#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\
- defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\
- defined(OS_ANDROID) || defined(OS_HPUX) || defined(CYGWIN)
-// Use fread/fwrite/fflush on platforms without _unlocked variants
-#define fread_unlocked fread
-#define fwrite_unlocked fwrite
-#define fflush_unlocked fflush
-#endif
-
-#if defined(OS_MACOSX) || defined(OS_FREEBSD) ||\
- defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD)
-// Use fsync() on platforms without fdatasync()
-#define fdatasync fsync
-#endif
-
-#if defined(OS_ANDROID) && __ANDROID_API__ < 9
-// fdatasync() was only introduced in API level 9 on Android. Use fsync()
-// when targetting older platforms.
-#define fdatasync fsync
-#endif
-
-namespace leveldb {
-namespace port {
-
-static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
-#undef PLATFORM_IS_LITTLE_ENDIAN
-
-class CondVar;
-
-class Mutex {
- public:
- Mutex();
- ~Mutex();
-
- void Lock();
- void Unlock();
- void AssertHeld() { }
-
- private:
- friend class CondVar;
- pthread_mutex_t mu_;
-
- // No copying
- Mutex(const Mutex&);
- void operator=(const Mutex&);
-};
-
-class CondVar {
- public:
- explicit CondVar(Mutex* mu);
- ~CondVar();
- void Wait();
- void Signal();
- void SignalAll();
- private:
- pthread_cond_t cv_;
- Mutex* mu_;
-};
-
-typedef pthread_once_t OnceType;
-#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT
-extern void InitOnce(OnceType* once, void (*initializer)());
-
-inline bool Snappy_Compress(const char* input, size_t length,
- ::std::string* output) {
-#ifdef SNAPPY
- output->resize(snappy::MaxCompressedLength(length));
- size_t outlen;
- snappy::RawCompress(input, length, &(*output)[0], &outlen);
- output->resize(outlen);
- return true;
-#endif
-
- return false;
-}
-
-inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
- size_t* result) {
-#ifdef SNAPPY
- return snappy::GetUncompressedLength(input, length, result);
-#else
- return false;
-#endif
-}
-
-inline bool Snappy_Uncompress(const char* input, size_t length,
- char* output) {
-#ifdef SNAPPY
- return snappy::RawUncompress(input, length, output);
-#else
- return false;
-#endif
-}
-
-inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
- return false;
-}
-
-uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size);
-
-} // namespace port
-} // namespace leveldb
-
-#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
+++ /dev/null
-// Copyright 2016 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-//
-// A portable implementation of crc32c, optimized to handle
-// four bytes at a time.
-//
-// In a separate source file to allow this accelerated CRC32C function to be
-// compiled with the appropriate compiler flags to enable x86 SSE 4.2
-// instructions.
-
-#include <stdint.h>
-#include <string.h>
-#include "port/port.h"
-
-#if defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-#if defined(_MSC_VER)
-#include <intrin.h>
-#elif defined(__GNUC__) && defined(__SSE4_2__)
-#include <nmmintrin.h>
-#include <cpuid.h>
-#endif
-
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-namespace leveldb {
-namespace port {
-
-#if defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
-static inline uint32_t LE_LOAD32(const uint8_t *p) {
- // SSE is x86 only, so ensured that |p| is always little-endian.
- uint32_t word;
- memcpy(&word, p, sizeof(word));
- return word;
-}
-
-#if defined(_M_X64) || defined(__x86_64__) // LE_LOAD64 is only used on x64.
-
-// Used to fetch a naturally-aligned 64-bit word in little endian byte-order
-static inline uint64_t LE_LOAD64(const uint8_t *p) {
- uint64_t dword;
- memcpy(&dword, p, sizeof(dword));
- return dword;
-}
-
-#endif // defined(_M_X64) || defined(__x86_64__)
-
-static inline bool HaveSSE42() {
-#if defined(_MSC_VER)
- int cpu_info[4];
- __cpuid(cpu_info, 1);
- return (cpu_info[2] & (1 << 20)) != 0;
-#elif defined(__GNUC__)
- unsigned int eax, ebx, ecx, edx;
- __get_cpuid(1, &eax, &ebx, &ecx, &edx);
- return (ecx & (1 << 20)) != 0;
-#else
- return false;
-#endif
-}
-
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-
-// For further improvements see Intel publication at:
-// http://download.intel.com/design/intarch/papers/323405.pdf
-uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
-#if !defined(LEVELDB_PLATFORM_POSIX_SSE)
- return 0;
-#else
- static bool have = HaveSSE42();
- if (!have) {
- return 0;
- }
-
- const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
- const uint8_t *e = p + size;
- uint32_t l = crc ^ 0xffffffffu;
-
-#define STEP1 do { \
- l = _mm_crc32_u8(l, *p++); \
-} while (0)
-#define STEP4 do { \
- l = _mm_crc32_u32(l, LE_LOAD32(p)); \
- p += 4; \
-} while (0)
-#define STEP8 do { \
- l = _mm_crc32_u64(l, LE_LOAD64(p)); \
- p += 8; \
-} while (0)
-
- if (size > 16) {
- // Process unaligned bytes
- for (unsigned int i = reinterpret_cast<uintptr_t>(p) % 8; i; --i) {
- STEP1;
- }
-
- // _mm_crc32_u64 is only available on x64.
-#if defined(_M_X64) || defined(__x86_64__)
- // Process 8 bytes at a time
- while ((e-p) >= 8) {
- STEP8;
- }
- // Process 4 bytes at a time
- if ((e-p) >= 4) {
- STEP4;
- }
-#else // !(defined(_M_X64) || defined(__x86_64__))
- // Process 4 bytes at a time
- while ((e-p) >= 4) {
- STEP4;
- }
-#endif // defined(_M_X64) || defined(__x86_64__)
- }
- // Process the last few bytes
- while (p != e) {
- STEP1;
- }
-#undef STEP8
-#undef STEP4
-#undef STEP1
- return l ^ 0xffffffffu;
-#endif // defined(LEVELDB_PLATFORM_POSIX_SSE)
-}
-
-} // namespace port
-} // namespace leveldb
--- /dev/null
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
+#define STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
+
+// port/port_config.h availability is automatically detected via __has_include
+// in newer compilers. If LEVELDB_HAS_PORT_CONFIG_H is defined, it overrides the
+// configuration detection.
+#if defined(LEVELDB_HAS_PORT_CONFIG_H)
+
+#if LEVELDB_HAS_PORT_CONFIG_H
+#include "port/port_config.h"
+#endif // LEVELDB_HAS_PORT_CONFIG_H
+
+#elif defined(__has_include)
+
+#if __has_include("port/port_config.h")
+#include "port/port_config.h"
+#endif // __has_include("port/port_config.h")
+
+#endif // defined(LEVELDB_HAS_PORT_CONFIG_H)
+
+#if HAVE_CRC32C
+#include <crc32c/crc32c.h>
+#endif // HAVE_CRC32C
+#if HAVE_SNAPPY
+#include <snappy.h>
+#endif // HAVE_SNAPPY
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <condition_variable> // NOLINT
+#include <mutex> // NOLINT
+#include <string>
+
+#include "port/thread_annotations.h"
+
+namespace leveldb {
+namespace port {
+
+static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN;
+
+class CondVar;
+
+// Thinly wraps std::mutex.
+class LOCKABLE Mutex {
+ public:
+ Mutex() = default;
+ ~Mutex() = default;
+
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
+
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
+ void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
+ void AssertHeld() ASSERT_EXCLUSIVE_LOCK() { }
+
+ private:
+ friend class CondVar;
+ std::mutex mu_;
+};
+
+// Thinly wraps std::condition_variable.
+class CondVar {
+ public:
+ explicit CondVar(Mutex* mu) : mu_(mu) { assert(mu != nullptr); }
+ ~CondVar() = default;
+
+ CondVar(const CondVar&) = delete;
+ CondVar& operator=(const CondVar&) = delete;
+
+ void Wait() {
+ std::unique_lock<std::mutex> lock(mu_->mu_, std::adopt_lock);
+ cv_.wait(lock);
+ lock.release();
+ }
+ void Signal() { cv_.notify_one(); }
+ void SignalAll() { cv_.notify_all(); }
+ private:
+ std::condition_variable cv_;
+ Mutex* const mu_;
+};
+
+inline bool Snappy_Compress(const char* input, size_t length,
+ std::string* output) {
+#if HAVE_SNAPPY
+ output->resize(snappy::MaxCompressedLength(length));
+ size_t outlen;
+ snappy::RawCompress(input, length, &(*output)[0], &outlen);
+ output->resize(outlen);
+ return true;
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input; (void)length; (void)output;
+#endif // HAVE_SNAPPY
+
+ return false;
+}
+
+inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result) {
+#if HAVE_SNAPPY
+ return snappy::GetUncompressedLength(input, length, result);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input; (void)length; (void)result;
+ return false;
+#endif // HAVE_SNAPPY
+}
+
+inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
+#if HAVE_SNAPPY
+ return snappy::RawUncompress(input, length, output);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)input; (void)length; (void)output;
+ return false;
+#endif // HAVE_SNAPPY
+}
+
+inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
+ // Silence compiler warnings about unused arguments.
+ (void)func; (void)arg;
+ return false;
+}
+
+inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
+#if HAVE_CRC32C
+ return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
+#else
+ // Silence compiler warnings about unused arguments.
+ (void)crc; (void)buf; (void)size;
+ return 0;
+#endif // HAVE_CRC32C
+}
+
+} // namespace port
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
#ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
#define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
-// Some environments provide custom macros to aid in static thread-safety
-// analysis. Provide empty definitions of such macros unless they are already
-// defined.
+// Use Clang's thread safety analysis annotations when available. In other
+// environments, the macros receive empty definitions.
+// Usage documentation: https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+
+#if !defined(THREAD_ANNOTATION_ATTRIBUTE__)
+
+#if defined(__clang__)
+
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+#endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
+
+#ifndef GUARDED_BY
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+#endif
+
+#ifndef PT_GUARDED_BY
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+#endif
+
+#ifndef ACQUIRED_AFTER
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+#endif
+
+#ifndef ACQUIRED_BEFORE
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+#endif
#ifndef EXCLUSIVE_LOCKS_REQUIRED
-#define EXCLUSIVE_LOCKS_REQUIRED(...)
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
#endif
#ifndef SHARED_LOCKS_REQUIRED
-#define SHARED_LOCKS_REQUIRED(...)
+#define SHARED_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
#endif
#ifndef LOCKS_EXCLUDED
-#define LOCKS_EXCLUDED(...)
+#define LOCKS_EXCLUDED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#endif
#ifndef LOCK_RETURNED
-#define LOCK_RETURNED(x)
+#define LOCK_RETURNED(x) \
+ THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#endif
#ifndef LOCKABLE
-#define LOCKABLE
+#define LOCKABLE \
+ THREAD_ANNOTATION_ATTRIBUTE__(lockable)
#endif
#ifndef SCOPED_LOCKABLE
-#define SCOPED_LOCKABLE
+#define SCOPED_LOCKABLE \
+ THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#endif
#ifndef EXCLUSIVE_LOCK_FUNCTION
-#define EXCLUSIVE_LOCK_FUNCTION(...)
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
#endif
#ifndef SHARED_LOCK_FUNCTION
-#define SHARED_LOCK_FUNCTION(...)
+#define SHARED_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
#endif
#ifndef EXCLUSIVE_TRYLOCK_FUNCTION
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
#endif
#ifndef SHARED_TRYLOCK_FUNCTION
-#define SHARED_TRYLOCK_FUNCTION(...)
+#define SHARED_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
#endif
#ifndef UNLOCK_FUNCTION
-#define UNLOCK_FUNCTION(...)
+#define UNLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
#endif
#ifndef NO_THREAD_SAFETY_ANALYSIS
-#define NO_THREAD_SAFETY_ANALYSIS
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+#endif
+
+#ifndef ASSERT_EXCLUSIVE_LOCK
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+#endif
+
+#ifndef ASSERT_SHARED_LOCK
+#define ASSERT_SHARED_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
#endif
#endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
+++ /dev/null
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// MSVC didn't ship with this file until the 2010 version.
-
-#ifndef STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-#define STORAGE_LEVELDB_PORT_WIN_STDINT_H_
-
-#if !defined(_MSC_VER)
-#error This file should only be included when compiling with MSVC.
-#endif
-
-// Define C99 equivalent types.
-typedef signed char int8_t;
-typedef signed short int16_t;
-typedef signed int int32_t;
-typedef signed long long int64_t;
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned int uint32_t;
-typedef unsigned long long uint64_t;
-
-#endif // STORAGE_LEVELDB_PORT_WIN_STDINT_H_
// and the length of the value in "*shared", "*non_shared", and
// "*value_length", respectively. Will not dereference past "limit".
//
-// If any errors are detected, returns NULL. Otherwise, returns a
+// If any errors are detected, returns nullptr. Otherwise, returns a
// pointer to the key delta (just past the three decoded values).
static inline const char* DecodeEntry(const char* p, const char* limit,
uint32_t* shared,
uint32_t* non_shared,
uint32_t* value_length) {
- if (limit - p < 3) return NULL;
+ if (limit - p < 3) return nullptr;
*shared = reinterpret_cast<const unsigned char*>(p)[0];
*non_shared = reinterpret_cast<const unsigned char*>(p)[1];
*value_length = reinterpret_cast<const unsigned char*>(p)[2];
// Fast path: all three values are encoded in one byte each
p += 3;
} else {
- if ((p = GetVarint32Ptr(p, limit, shared)) == NULL) return NULL;
- if ((p = GetVarint32Ptr(p, limit, non_shared)) == NULL) return NULL;
- if ((p = GetVarint32Ptr(p, limit, value_length)) == NULL) return NULL;
+ if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
+ if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
+ if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr;
}
if (static_cast<uint32_t>(limit - p) < (*non_shared + *value_length)) {
- return NULL;
+ return nullptr;
}
return p;
}
const char* key_ptr = DecodeEntry(data_ + region_offset,
data_ + restarts_,
&shared, &non_shared, &value_length);
- if (key_ptr == NULL || (shared != 0)) {
+ if (key_ptr == nullptr || (shared != 0)) {
CorruptionError();
return;
}
// Decode next entry
uint32_t shared, non_shared, value_length;
p = DecodeEntry(p, limit, &shared, &non_shared, &value_length);
- if (p == NULL || key_.size() < shared) {
+ if (p == nullptr || key_.size() < shared) {
CorruptionError();
return false;
} else {
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
const Slice& contents)
: policy_(policy),
- data_(NULL),
- offset_(NULL),
+ data_(nullptr),
+ offset_(nullptr),
num_(0),
base_lg_(0) {
size_t n = contents.size();
// Read the block identified by "handle" from "file". On failure
// return non-OK. On success fill *result and return OK.
-extern Status ReadBlock(RandomAccessFile* file,
- const ReadOptions& options,
- const BlockHandle& handle,
- BlockContents* result);
+Status ReadBlock(RandomAccessFile* file,
+ const ReadOptions& options,
+ const BlockHandle& handle,
+ BlockContents* result);
// Implementation details follow. Clients should ignore,
namespace leveldb {
Iterator::Iterator() {
- cleanup_.function = NULL;
- cleanup_.next = NULL;
+ cleanup_head_.function = nullptr;
+ cleanup_head_.next = nullptr;
}
Iterator::~Iterator() {
- if (cleanup_.function != NULL) {
- (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
- for (Cleanup* c = cleanup_.next; c != NULL; ) {
- (*c->function)(c->arg1, c->arg2);
- Cleanup* next = c->next;
- delete c;
- c = next;
+ if (!cleanup_head_.IsEmpty()) {
+ cleanup_head_.Run();
+ for (CleanupNode* node = cleanup_head_.next; node != nullptr; ) {
+ node->Run();
+ CleanupNode* next_node = node->next;
+ delete node;
+ node = next_node;
}
}
}
void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
- assert(func != NULL);
- Cleanup* c;
- if (cleanup_.function == NULL) {
- c = &cleanup_;
+ assert(func != nullptr);
+ CleanupNode* node;
+ if (cleanup_head_.IsEmpty()) {
+ node = &cleanup_head_;
} else {
- c = new Cleanup;
- c->next = cleanup_.next;
- cleanup_.next = c;
+ node = new CleanupNode();
+ node->next = cleanup_head_.next;
+ cleanup_head_.next = node;
}
- c->function = func;
- c->arg1 = arg1;
- c->arg2 = arg2;
+ node->function = func;
+ node->arg1 = arg1;
+ node->arg2 = arg2;
}
namespace {
+
class EmptyIterator : public Iterator {
public:
EmptyIterator(const Status& s) : status_(s) { }
- virtual bool Valid() const { return false; }
- virtual void Seek(const Slice& target) { }
- virtual void SeekToFirst() { }
- virtual void SeekToLast() { }
- virtual void Next() { assert(false); }
- virtual void Prev() { assert(false); }
- Slice key() const { assert(false); return Slice(); }
- Slice value() const { assert(false); return Slice(); }
- virtual Status status() const { return status_; }
+ ~EmptyIterator() override = default;
+
+ bool Valid() const override { return false; }
+ void Seek(const Slice& target) override { }
+ void SeekToFirst() override { }
+ void SeekToLast() override { }
+ void Next() override { assert(false); }
+ void Prev() override { assert(false); }
+ Slice key() const override { assert(false); return Slice(); }
+ Slice value() const override { assert(false); return Slice(); }
+ Status status() const override { return status_; }
+
private:
Status status_;
};
-} // namespace
+
+} // anonymous namespace
Iterator* NewEmptyIterator() {
return new EmptyIterator(Status::OK());
// cache locality.
class IteratorWrapper {
public:
- IteratorWrapper(): iter_(NULL), valid_(false) { }
- explicit IteratorWrapper(Iterator* iter): iter_(NULL) {
+ IteratorWrapper(): iter_(nullptr), valid_(false) { }
+ explicit IteratorWrapper(Iterator* iter): iter_(nullptr) {
Set(iter);
}
~IteratorWrapper() { delete iter_; }
void Set(Iterator* iter) {
delete iter_;
iter_ = iter;
- if (iter_ == NULL) {
+ if (iter_ == nullptr) {
valid_ = false;
} else {
Update();
bool Valid() const { return valid_; }
Slice key() const { assert(Valid()); return key_; }
Slice value() const { assert(Valid()); return iter_->value(); }
- // Methods below require iter() != NULL
+ // Methods below require iter() != nullptr
Status status() const { assert(iter_); return iter_->status(); }
void Next() { assert(iter_); iter_->Next(); Update(); }
void Prev() { assert(iter_); iter_->Prev(); Update(); }
: comparator_(comparator),
children_(new IteratorWrapper[n]),
n_(n),
- current_(NULL),
+ current_(nullptr),
direction_(kForward) {
for (int i = 0; i < n; i++) {
children_[i].Set(children[i]);
}
virtual bool Valid() const {
- return (current_ != NULL);
+ return (current_ != nullptr);
}
virtual void SeekToFirst() {
};
void MergingIterator::FindSmallest() {
- IteratorWrapper* smallest = NULL;
+ IteratorWrapper* smallest = nullptr;
for (int i = 0; i < n_; i++) {
IteratorWrapper* child = &children_[i];
if (child->Valid()) {
- if (smallest == NULL) {
+ if (smallest == nullptr) {
smallest = child;
} else if (comparator_->Compare(child->key(), smallest->key()) < 0) {
smallest = child;
}
void MergingIterator::FindLargest() {
- IteratorWrapper* largest = NULL;
+ IteratorWrapper* largest = nullptr;
for (int i = n_-1; i >= 0; i--) {
IteratorWrapper* child = &children_[i];
if (child->Valid()) {
- if (largest == NULL) {
+ if (largest == nullptr) {
largest = child;
} else if (comparator_->Compare(child->key(), largest->key()) > 0) {
largest = child;
// key is present in K child iterators, it will be yielded K times.
//
// REQUIRES: n >= 0
-extern Iterator* NewMergingIterator(
+Iterator* NewMergingIterator(
const Comparator* comparator, Iterator** children, int n);
} // namespace leveldb
RandomAccessFile* file,
uint64_t size,
Table** table) {
- *table = NULL;
+ *table = nullptr;
if (size < Footer::kEncodedLength) {
return Status::Corruption("file is too short to be an sstable");
}
if (!s.ok()) return s;
// Read the index block
- BlockContents contents;
- Block* index_block = NULL;
+ BlockContents index_block_contents;
if (s.ok()) {
ReadOptions opt;
if (options.paranoid_checks) {
opt.verify_checksums = true;
}
- s = ReadBlock(file, opt, footer.index_handle(), &contents);
- if (s.ok()) {
- index_block = new Block(contents);
- }
+ s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
}
if (s.ok()) {
// We've successfully read the footer and the index block: we're
// ready to serve requests.
+ Block* index_block = new Block(index_block_contents);
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
- rep->filter_data = NULL;
- rep->filter = NULL;
+ rep->filter_data = nullptr;
+ rep->filter = nullptr;
*table = new Table(rep);
(*table)->ReadMeta(footer);
- } else {
- delete index_block;
}
return s;
}
void Table::ReadMeta(const Footer& footer) {
- if (rep_->options.filter_policy == NULL) {
+ if (rep_->options.filter_policy == nullptr) {
return; // Do not need any metadata
}
const Slice& index_value) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
- Block* block = NULL;
- Cache::Handle* cache_handle = NULL;
+ Block* block = nullptr;
+ Cache::Handle* cache_handle = nullptr;
BlockHandle handle;
Slice input = index_value;
if (s.ok()) {
BlockContents contents;
- if (block_cache != NULL) {
+ if (block_cache != nullptr) {
char cache_key_buffer[16];
EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
EncodeFixed64(cache_key_buffer+8, handle.offset());
Slice key(cache_key_buffer, sizeof(cache_key_buffer));
cache_handle = block_cache->Lookup(key);
- if (cache_handle != NULL) {
+ if (cache_handle != nullptr) {
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
}
Iterator* iter;
- if (block != NULL) {
+ if (block != nullptr) {
iter = block->NewIterator(table->rep_->options.comparator);
- if (cache_handle == NULL) {
- iter->RegisterCleanup(&DeleteBlock, block, NULL);
+ if (cache_handle == nullptr) {
+ iter->RegisterCleanup(&DeleteBlock, block, nullptr);
} else {
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
}
Slice handle_value = iiter->value();
FilterBlockReader* filter = rep_->filter;
BlockHandle handle;
- if (filter != NULL &&
+ if (filter != nullptr &&
handle.DecodeFrom(&handle_value).ok() &&
!filter->KeyMayMatch(handle.offset(), k)) {
// Not found
index_block(&index_block_options),
num_entries(0),
closed(false),
- filter_block(opt.filter_policy == NULL ? NULL
+ filter_block(opt.filter_policy == nullptr ? nullptr
: new FilterBlockBuilder(opt.filter_policy)),
pending_index_entry(false) {
index_block_options.block_restart_interval = 1;
TableBuilder::TableBuilder(const Options& options, WritableFile* file)
: rep_(new Rep(options, file)) {
- if (rep_->filter_block != NULL) {
+ if (rep_->filter_block != nullptr) {
rep_->filter_block->StartBlock(0);
}
}
r->pending_index_entry = false;
}
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
r->filter_block->AddKey(key);
}
r->pending_index_entry = true;
r->status = r->file->Flush();
}
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
r->filter_block->StartBlock(r->offset);
}
}
BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle;
// Write filter block
- if (ok() && r->filter_block != NULL) {
+ if (ok() && r->filter_block != nullptr) {
WriteRawBlock(r->filter_block->Finish(), kNoCompression,
&filter_block_handle);
}
// Write metaindex block
if (ok()) {
BlockBuilder meta_index_block(&r->options);
- if (r->filter_block != NULL) {
+ if (r->filter_block != nullptr) {
// Add mapping from "filter.Name" to location of filter data
std::string key = "filter.";
key.append(r->options.filter_policy->Name());
virtual const KVMap& data() { return data_; }
- virtual DB* db() const { return NULL; } // Overridden in DBConstructor
+ virtual DB* db() const { return nullptr; } // Overridden in DBConstructor
private:
KVMap data_;
explicit BlockConstructor(const Comparator* cmp)
: Constructor(cmp),
comparator_(cmp),
- block_(NULL) { }
+ block_(nullptr) { }
~BlockConstructor() {
delete block_;
}
virtual Status FinishImpl(const Options& options, const KVMap& data) {
delete block_;
- block_ = NULL;
+ block_ = nullptr;
BlockBuilder builder(&options);
for (KVMap::const_iterator it = data.begin();
public:
TableConstructor(const Comparator* cmp)
: Constructor(cmp),
- source_(NULL), table_(NULL) {
+ source_(nullptr), table_(nullptr) {
}
~TableConstructor() {
Reset();
void Reset() {
delete table_;
delete source_;
- table_ = NULL;
- source_ = NULL;
+ table_ = nullptr;
+ source_ = nullptr;
}
StringSource* source_;
explicit DBConstructor(const Comparator* cmp)
: Constructor(cmp),
comparator_(cmp) {
- db_ = NULL;
+ db_ = nullptr;
NewDB();
}
~DBConstructor() {
}
virtual Status FinishImpl(const Options& options, const KVMap& data) {
delete db_;
- db_ = NULL;
+ db_ = nullptr;
NewDB();
for (KVMap::const_iterator it = data.begin();
it != data.end();
class Harness {
public:
- Harness() : constructor_(NULL) { }
+ Harness() : constructor_(nullptr) { }
void Init(const TestArgs& args) {
delete constructor_;
- constructor_ = NULL;
+ constructor_ = nullptr;
options_ = Options();
options_.block_restart_interval = args.restart_interval;
}
}
- // Returns NULL if not running against a DB
+ // Returns nullptr if not running against a DB
DB* db() const { return constructor_->db(); }
private:
// It'd be nice if status() returned a const Status& instead of a Status
if (!index_iter_.status().ok()) {
return index_iter_.status();
- } else if (data_iter_.iter() != NULL && !data_iter_.status().ok()) {
+ } else if (data_iter_.iter() != nullptr && !data_iter_.status().ok()) {
return data_iter_.status();
} else {
return status_;
const ReadOptions options_;
Status status_;
IteratorWrapper index_iter_;
- IteratorWrapper data_iter_; // May be NULL
- // If data_iter_ is non-NULL, then "data_block_handle_" holds the
+ IteratorWrapper data_iter_; // May be nullptr
+ // If data_iter_ is non-null, then "data_block_handle_" holds the
// "index_value" passed to block_function_ to create the data_iter_.
std::string data_block_handle_;
};
arg_(arg),
options_(options),
index_iter_(index_iter),
- data_iter_(NULL) {
+ data_iter_(nullptr) {
}
TwoLevelIterator::~TwoLevelIterator() {
void TwoLevelIterator::Seek(const Slice& target) {
index_iter_.Seek(target);
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.Seek(target);
+ if (data_iter_.iter() != nullptr) data_iter_.Seek(target);
SkipEmptyDataBlocksForward();
}
void TwoLevelIterator::SeekToFirst() {
index_iter_.SeekToFirst();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToFirst();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst();
SkipEmptyDataBlocksForward();
}
void TwoLevelIterator::SeekToLast() {
index_iter_.SeekToLast();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToLast();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToLast();
SkipEmptyDataBlocksBackward();
}
void TwoLevelIterator::SkipEmptyDataBlocksForward() {
- while (data_iter_.iter() == NULL || !data_iter_.Valid()) {
+ while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
return;
}
index_iter_.Next();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToFirst();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst();
}
}
void TwoLevelIterator::SkipEmptyDataBlocksBackward() {
- while (data_iter_.iter() == NULL || !data_iter_.Valid()) {
+ while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
return;
}
index_iter_.Prev();
InitDataBlock();
- if (data_iter_.iter() != NULL) data_iter_.SeekToLast();
+ if (data_iter_.iter() != nullptr) data_iter_.SeekToLast();
}
}
void TwoLevelIterator::SetDataIterator(Iterator* data_iter) {
- if (data_iter_.iter() != NULL) SaveError(data_iter_.status());
+ if (data_iter_.iter() != nullptr) SaveError(data_iter_.status());
data_iter_.Set(data_iter);
}
void TwoLevelIterator::InitDataBlock() {
if (!index_iter_.Valid()) {
- SetDataIterator(NULL);
+ SetDataIterator(nullptr);
} else {
Slice handle = index_iter_.value();
- if (data_iter_.iter() != NULL && handle.compare(data_block_handle_) == 0) {
+ if (data_iter_.iter() != nullptr && handle.compare(data_block_handle_) == 0) {
// data_iter_ is already constructed with this iterator, so
// no need to change anything
} else {
//
// Uses a supplied function to convert an index_iter value into
// an iterator over the contents of the corresponding block.
-extern Iterator* NewTwoLevelIterator(
+Iterator* NewTwoLevelIterator(
Iterator* index_iter,
Iterator* (*block_function)(
void* arg,
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/arena.h"
-#include <assert.h>
namespace leveldb {
static const int kBlockSize = 4096;
Arena::Arena() : memory_usage_(0) {
- alloc_ptr_ = NULL; // First allocation will allocate a block
+ alloc_ptr_ = nullptr; // First allocation will allocate a block
alloc_bytes_remaining_ = 0;
}
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* result = new char[block_bytes];
blocks_.push_back(result);
- memory_usage_.NoBarrier_Store(
- reinterpret_cast<void*>(MemoryUsage() + block_bytes + sizeof(char*)));
+ memory_usage_.fetch_add(block_bytes + sizeof(char*),
+ std::memory_order_relaxed);
return result;
}
#ifndef STORAGE_LEVELDB_UTIL_ARENA_H_
#define STORAGE_LEVELDB_UTIL_ARENA_H_
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
#include <vector>
-#include <assert.h>
-#include <stddef.h>
-#include <stdint.h>
-#include "port/port.h"
namespace leveldb {
// Return a pointer to a newly allocated memory block of "bytes" bytes.
char* Allocate(size_t bytes);
- // Allocate memory with the normal alignment guarantees provided by malloc
+ // Allocate memory with the normal alignment guarantees provided by malloc.
char* AllocateAligned(size_t bytes);
// Returns an estimate of the total memory usage of data allocated
// by the arena.
size_t MemoryUsage() const {
- return reinterpret_cast<uintptr_t>(memory_usage_.NoBarrier_Load());
+ return memory_usage_.load(std::memory_order_relaxed);
}
private:
std::vector<char*> blocks_;
// Total memory usage of the arena.
- port::AtomicPointer memory_usage_;
+ //
+ // TODO(costan): This member is accessed via atomics, but the others are
+ // accessed without any locking. Is this OK?
+ std::atomic<size_t> memory_usage_;
// No copying allowed
Arena(const Arena&);
#include "leveldb/cache.h"
#include "port/port.h"
+#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/mutexlock.h"
char key_data[1]; // Beginning of key
Slice key() const {
- // For cheaper lookups, we allow a temporary Handle object
- // to store a pointer to a key in "value".
- if (next == this) {
- return *(reinterpret_cast<Slice*>(value));
- } else {
- return Slice(key_data, key_length);
- }
+ // next_ is only equal to this if the LRU handle is the list head of an
+ // empty list. List heads never have meaningful keys.
+ assert(next != this);
+
+ return Slice(key_data, key_length);
}
};
// 4.4.3's builtin hashtable.
class HandleTable {
public:
- HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); }
+ HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr;
- h->next_hash = (old == NULL ? NULL : old->next_hash);
+ h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
- if (old == NULL) {
+ if (old == nullptr) {
++elems_;
if (elems_ > length_) {
// Since each cache entry is fairly large, we aim for a small
LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
- if (result != NULL) {
+ if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
// pointer to the trailing slot in the corresponding linked list.
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
- while (*ptr != NULL &&
+ while (*ptr != nullptr &&
((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
- while (h != NULL) {
+ while (h != nullptr) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
void LRU_Append(LRUHandle*list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
- bool FinishErase(LRUHandle* e);
+ bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Initialized before use.
size_t capacity_;
// mutex_ protects the following state.
mutable port::Mutex mutex_;
- size_t usage_;
+ size_t usage_ GUARDED_BY(mutex_);
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
// Entries have refs==1 and in_cache==true.
- LRUHandle lru_;
+ LRUHandle lru_ GUARDED_BY(mutex_);
// Dummy head of in-use list.
// Entries are in use by clients, and have refs >= 2 and in_cache==true.
- LRUHandle in_use_;
+ LRUHandle in_use_ GUARDED_BY(mutex_);
- HandleTable table_;
+ HandleTable table_ GUARDED_BY(mutex_);
};
LRUCache::LRUCache()
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
- if (e->refs == 0) { // Deallocate.
+ if (e->refs == 0) { // Deallocate.
assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
- } else if (e->in_cache && e->refs == 1) { // No longer in use; move to lru_ list.
+ } else if (e->in_cache && e->refs == 1) {
+ // No longer in use; move to lru_ list.
LRU_Remove(e);
LRU_Append(&lru_, e);
}
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
- if (e != NULL) {
+ if (e != nullptr) {
Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
LRU_Append(&in_use_, e);
usage_ += charge;
FinishErase(table_.Insert(e));
- } // else don't cache. (Tests use capacity_==0 to turn off caching.)
-
+ } else { // don't cache. (capacity_==0 is supported and turns off caching.)
+ // next is read by key() in an assert, so it must be initialized
+ e->next = nullptr;
+ }
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->refs == 1);
return reinterpret_cast<Cache::Handle*>(e);
}
-// If e != NULL, finish removing *e from the cache; it has already been removed
-// from the hash table. Return whether e != NULL. Requires mutex_ held.
+// If e != nullptr, finish removing *e from the cache; it has already been
+// removed from the hash table. Return whether e != nullptr.
bool LRUCache::FinishErase(LRUHandle* e) {
- if (e != NULL) {
+ if (e != nullptr) {
assert(e->in_cache);
LRU_Remove(e);
e->in_cache = false;
usage_ -= e->charge;
Unref(e);
}
- return e != NULL;
+ return e != nullptr;
}
void LRUCache::Erase(const Slice& key, uint32_t hash) {
int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
- const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle));
- if (handle != NULL) {
+ const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));
+ if (handle != nullptr) {
cache_->Release(handle);
}
return r;
ASSERT_EQ(-1, Lookup(2));
}
+TEST(CacheTest, ZeroSizeCache) {
+ delete cache_;
+ cache_ = NewLRUCache(0);
+
+ Insert(1, 100);
+ ASSERT_EQ(-1, Lookup(1));
+}
+
} // namespace leveldb
int main(int argc, char** argv) {
return reinterpret_cast<const char*>(p);
}
}
- return NULL;
+ return nullptr;
}
bool GetVarint32(Slice* input, uint32_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint32Ptr(p, limit, value);
- if (q == NULL) {
+ if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
return reinterpret_cast<const char*>(p);
}
}
- return NULL;
+ return nullptr;
}
bool GetVarint64(Slice* input, uint64_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint64Ptr(p, limit, value);
- if (q == NULL) {
+ if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
Slice* result) {
uint32_t len;
p = GetVarint32Ptr(p, limit, &len);
- if (p == NULL) return NULL;
- if (p + len > limit) return NULL;
+ if (p == nullptr) return nullptr;
+ if (p + len > limit) return nullptr;
*result = Slice(p, len);
return p + len;
}
#include <stdint.h>
#include <string.h>
+
#include <string>
+
#include "leveldb/slice.h"
#include "port/port.h"
namespace leveldb {
// Standard Put... routines append to a string
-extern void PutFixed32(std::string* dst, uint32_t value);
-extern void PutFixed64(std::string* dst, uint64_t value);
-extern void PutVarint32(std::string* dst, uint32_t value);
-extern void PutVarint64(std::string* dst, uint64_t value);
-extern void PutLengthPrefixedSlice(std::string* dst, const Slice& value);
+void PutFixed32(std::string* dst, uint32_t value);
+void PutFixed64(std::string* dst, uint64_t value);
+void PutVarint32(std::string* dst, uint32_t value);
+void PutVarint64(std::string* dst, uint64_t value);
+void PutLengthPrefixedSlice(std::string* dst, const Slice& value);
// Standard Get... routines parse a value from the beginning of a Slice
// and advance the slice past the parsed value.
-extern bool GetVarint32(Slice* input, uint32_t* value);
-extern bool GetVarint64(Slice* input, uint64_t* value);
-extern bool GetLengthPrefixedSlice(Slice* input, Slice* result);
+bool GetVarint32(Slice* input, uint32_t* value);
+bool GetVarint64(Slice* input, uint64_t* value);
+bool GetLengthPrefixedSlice(Slice* input, Slice* result);
// Pointer-based variants of GetVarint... These either store a value
// in *v and return a pointer just past the parsed value, or return
-// NULL on error. These routines only look at bytes in the range
+// nullptr on error. These routines only look at bytes in the range
// [p..limit-1]
-extern const char* GetVarint32Ptr(const char* p,const char* limit, uint32_t* v);
-extern const char* GetVarint64Ptr(const char* p,const char* limit, uint64_t* v);
+const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* v);
+const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* v);
// Returns the length of the varint32 or varint64 encoding of "v"
-extern int VarintLength(uint64_t v);
+int VarintLength(uint64_t v);
// Lower-level versions of Put... that write directly into a character buffer
// REQUIRES: dst has enough space for the value being written
-extern void EncodeFixed32(char* dst, uint32_t value);
-extern void EncodeFixed64(char* dst, uint64_t value);
+void EncodeFixed32(char* dst, uint32_t value);
+void EncodeFixed64(char* dst, uint64_t value);
// Lower-level versions of Put... that write directly into a character buffer
// and return a pointer just past the last byte written.
// REQUIRES: dst has enough space for the value being written
-extern char* EncodeVarint32(char* dst, uint32_t value);
-extern char* EncodeVarint64(char* dst, uint64_t value);
+char* EncodeVarint32(char* dst, uint32_t value);
+char* EncodeVarint64(char* dst, uint64_t value);
// Lower-level versions of Get... that read directly from a character buffer
// without any bounds checking.
}
// Internal routine for use by fallback path of GetVarint32Ptr
-extern const char* GetVarint32PtrFallback(const char* p,
- const char* limit,
- uint32_t* value);
+const char* GetVarint32PtrFallback(const char* p,
+ const char* limit,
+ uint32_t* value);
inline const char* GetVarint32Ptr(const char* p,
const char* limit,
uint32_t* value) {
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "util/coding.h"
+#include <vector>
+#include "util/coding.h"
#include "util/testharness.h"
namespace leveldb {
uint32_t actual;
const char* start = p;
p = GetVarint32Ptr(p, limit, &actual);
- ASSERT_TRUE(p != NULL);
+ ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
uint64_t actual;
const char* start = p;
p = GetVarint64Ptr(p, limit, &actual);
- ASSERT_TRUE(p != NULL);
+ ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, limit);
-
}
TEST(Coding, Varint32Overflow) {
uint32_t result;
std::string input("\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
- == NULL);
+ == nullptr);
}
TEST(Coding, Varint32Truncation) {
PutVarint32(&s, large_value);
uint32_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
- ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL);
+ ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
}
- ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL);
+ ASSERT_TRUE(
+ GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
ASSERT_EQ(large_value, result);
}
uint64_t result;
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
- == NULL);
+ == nullptr);
}
TEST(Coding, Varint64Truncation) {
PutVarint64(&s, large_value);
uint64_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
- ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL);
+ ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
- ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL);
+ ASSERT_TRUE(
+ GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
ASSERT_EQ(large_value, result);
}
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <algorithm>
-#include <stdint.h>
+#include <cstdint>
+#include <string>
+
#include "leveldb/comparator.h"
#include "leveldb/slice.h"
-#include "port/port.h"
#include "util/logging.h"
+#include "util/no_destructor.h"
namespace leveldb {
};
} // namespace
-static port::OnceType once = LEVELDB_ONCE_INIT;
-static const Comparator* bytewise;
-
-static void InitModule() {
- bytewise = new BytewiseComparatorImpl;
-}
-
const Comparator* BytewiseComparator() {
- port::InitOnce(&once, InitModule);
- return bytewise;
+ static NoDestructor<BytewiseComparatorImpl> singleton;
+ return singleton.get();
}
} // namespace leveldb
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
-// A portable implementation of crc32c, optimized to handle
-// four bytes at a time.
+// A portable implementation of crc32c.
#include "util/crc32c.h"
+#include <stddef.h>
#include <stdint.h>
#include "port/port.h"
namespace leveldb {
namespace crc32c {
-static const uint32_t table0_[256] = {
- 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4,
- 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
- 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
- 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
- 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b,
- 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
- 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54,
- 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
- 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
- 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
- 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5,
- 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
- 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45,
- 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
- 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
- 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
- 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48,
- 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
- 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687,
- 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
- 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
- 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
- 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8,
- 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
- 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096,
- 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
- 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
- 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
- 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9,
- 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
- 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36,
- 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
- 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
- 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
- 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043,
- 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
- 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3,
- 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
- 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
- 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
- 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652,
- 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
- 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d,
- 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
- 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
- 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
- 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2,
- 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
- 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530,
- 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
- 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
- 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
- 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f,
- 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
- 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90,
- 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
- 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
- 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
- 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321,
- 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
- 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81,
- 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
- 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
- 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351
-};
-static const uint32_t table1_[256] = {
- 0x00000000, 0x13a29877, 0x274530ee, 0x34e7a899,
- 0x4e8a61dc, 0x5d28f9ab, 0x69cf5132, 0x7a6dc945,
- 0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21,
- 0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd,
- 0x3fc5f181, 0x2c6769f6, 0x1880c16f, 0x0b225918,
- 0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4,
- 0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0,
- 0xec5b53e5, 0xfff9cb92, 0xcb1e630b, 0xd8bcfb7c,
- 0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b,
- 0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47,
- 0xe29f20ba, 0xf13db8cd, 0xc5da1054, 0xd6788823,
- 0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff,
- 0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a,
- 0x0ec4735f, 0x1d66eb28, 0x298143b1, 0x3a23dbc6,
- 0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2,
- 0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e,
- 0xff17c604, 0xecb55e73, 0xd852f6ea, 0xcbf06e9d,
- 0xb19da7d8, 0xa23f3faf, 0x96d89736, 0x857a0f41,
- 0x620305bc, 0x71a19dcb, 0x45463552, 0x56e4ad25,
- 0x2c896460, 0x3f2bfc17, 0x0bcc548e, 0x186eccf9,
- 0xc0d23785, 0xd370aff2, 0xe797076b, 0xf4359f1c,
- 0x8e585659, 0x9dface2e, 0xa91d66b7, 0xbabffec0,
- 0x5dc6f43d, 0x4e646c4a, 0x7a83c4d3, 0x69215ca4,
- 0x134c95e1, 0x00ee0d96, 0x3409a50f, 0x27ab3d78,
- 0x809c2506, 0x933ebd71, 0xa7d915e8, 0xb47b8d9f,
- 0xce1644da, 0xddb4dcad, 0xe9537434, 0xfaf1ec43,
- 0x1d88e6be, 0x0e2a7ec9, 0x3acdd650, 0x296f4e27,
- 0x53028762, 0x40a01f15, 0x7447b78c, 0x67e52ffb,
- 0xbf59d487, 0xacfb4cf0, 0x981ce469, 0x8bbe7c1e,
- 0xf1d3b55b, 0xe2712d2c, 0xd69685b5, 0xc5341dc2,
- 0x224d173f, 0x31ef8f48, 0x050827d1, 0x16aabfa6,
- 0x6cc776e3, 0x7f65ee94, 0x4b82460d, 0x5820de7a,
- 0xfbc3faf9, 0xe861628e, 0xdc86ca17, 0xcf245260,
- 0xb5499b25, 0xa6eb0352, 0x920cabcb, 0x81ae33bc,
- 0x66d73941, 0x7575a136, 0x419209af, 0x523091d8,
- 0x285d589d, 0x3bffc0ea, 0x0f186873, 0x1cbaf004,
- 0xc4060b78, 0xd7a4930f, 0xe3433b96, 0xf0e1a3e1,
- 0x8a8c6aa4, 0x992ef2d3, 0xadc95a4a, 0xbe6bc23d,
- 0x5912c8c0, 0x4ab050b7, 0x7e57f82e, 0x6df56059,
- 0x1798a91c, 0x043a316b, 0x30dd99f2, 0x237f0185,
- 0x844819fb, 0x97ea818c, 0xa30d2915, 0xb0afb162,
- 0xcac27827, 0xd960e050, 0xed8748c9, 0xfe25d0be,
- 0x195cda43, 0x0afe4234, 0x3e19eaad, 0x2dbb72da,
- 0x57d6bb9f, 0x447423e8, 0x70938b71, 0x63311306,
- 0xbb8de87a, 0xa82f700d, 0x9cc8d894, 0x8f6a40e3,
- 0xf50789a6, 0xe6a511d1, 0xd242b948, 0xc1e0213f,
- 0x26992bc2, 0x353bb3b5, 0x01dc1b2c, 0x127e835b,
- 0x68134a1e, 0x7bb1d269, 0x4f567af0, 0x5cf4e287,
- 0x04d43cfd, 0x1776a48a, 0x23910c13, 0x30339464,
- 0x4a5e5d21, 0x59fcc556, 0x6d1b6dcf, 0x7eb9f5b8,
- 0x99c0ff45, 0x8a626732, 0xbe85cfab, 0xad2757dc,
- 0xd74a9e99, 0xc4e806ee, 0xf00fae77, 0xe3ad3600,
- 0x3b11cd7c, 0x28b3550b, 0x1c54fd92, 0x0ff665e5,
- 0x759baca0, 0x663934d7, 0x52de9c4e, 0x417c0439,
- 0xa6050ec4, 0xb5a796b3, 0x81403e2a, 0x92e2a65d,
- 0xe88f6f18, 0xfb2df76f, 0xcfca5ff6, 0xdc68c781,
- 0x7b5fdfff, 0x68fd4788, 0x5c1aef11, 0x4fb87766,
- 0x35d5be23, 0x26772654, 0x12908ecd, 0x013216ba,
- 0xe64b1c47, 0xf5e98430, 0xc10e2ca9, 0xd2acb4de,
- 0xa8c17d9b, 0xbb63e5ec, 0x8f844d75, 0x9c26d502,
- 0x449a2e7e, 0x5738b609, 0x63df1e90, 0x707d86e7,
- 0x0a104fa2, 0x19b2d7d5, 0x2d557f4c, 0x3ef7e73b,
- 0xd98eedc6, 0xca2c75b1, 0xfecbdd28, 0xed69455f,
- 0x97048c1a, 0x84a6146d, 0xb041bcf4, 0xa3e32483
-};
-static const uint32_t table2_[256] = {
- 0x00000000, 0xa541927e, 0x4f6f520d, 0xea2ec073,
- 0x9edea41a, 0x3b9f3664, 0xd1b1f617, 0x74f06469,
- 0x38513ec5, 0x9d10acbb, 0x773e6cc8, 0xd27ffeb6,
- 0xa68f9adf, 0x03ce08a1, 0xe9e0c8d2, 0x4ca15aac,
- 0x70a27d8a, 0xd5e3eff4, 0x3fcd2f87, 0x9a8cbdf9,
- 0xee7cd990, 0x4b3d4bee, 0xa1138b9d, 0x045219e3,
- 0x48f3434f, 0xedb2d131, 0x079c1142, 0xa2dd833c,
- 0xd62de755, 0x736c752b, 0x9942b558, 0x3c032726,
- 0xe144fb14, 0x4405696a, 0xae2ba919, 0x0b6a3b67,
- 0x7f9a5f0e, 0xdadbcd70, 0x30f50d03, 0x95b49f7d,
- 0xd915c5d1, 0x7c5457af, 0x967a97dc, 0x333b05a2,
- 0x47cb61cb, 0xe28af3b5, 0x08a433c6, 0xade5a1b8,
- 0x91e6869e, 0x34a714e0, 0xde89d493, 0x7bc846ed,
- 0x0f382284, 0xaa79b0fa, 0x40577089, 0xe516e2f7,
- 0xa9b7b85b, 0x0cf62a25, 0xe6d8ea56, 0x43997828,
- 0x37691c41, 0x92288e3f, 0x78064e4c, 0xdd47dc32,
- 0xc76580d9, 0x622412a7, 0x880ad2d4, 0x2d4b40aa,
- 0x59bb24c3, 0xfcfab6bd, 0x16d476ce, 0xb395e4b0,
- 0xff34be1c, 0x5a752c62, 0xb05bec11, 0x151a7e6f,
- 0x61ea1a06, 0xc4ab8878, 0x2e85480b, 0x8bc4da75,
- 0xb7c7fd53, 0x12866f2d, 0xf8a8af5e, 0x5de93d20,
- 0x29195949, 0x8c58cb37, 0x66760b44, 0xc337993a,
- 0x8f96c396, 0x2ad751e8, 0xc0f9919b, 0x65b803e5,
- 0x1148678c, 0xb409f5f2, 0x5e273581, 0xfb66a7ff,
- 0x26217bcd, 0x8360e9b3, 0x694e29c0, 0xcc0fbbbe,
- 0xb8ffdfd7, 0x1dbe4da9, 0xf7908dda, 0x52d11fa4,
- 0x1e704508, 0xbb31d776, 0x511f1705, 0xf45e857b,
- 0x80aee112, 0x25ef736c, 0xcfc1b31f, 0x6a802161,
- 0x56830647, 0xf3c29439, 0x19ec544a, 0xbcadc634,
- 0xc85da25d, 0x6d1c3023, 0x8732f050, 0x2273622e,
- 0x6ed23882, 0xcb93aafc, 0x21bd6a8f, 0x84fcf8f1,
- 0xf00c9c98, 0x554d0ee6, 0xbf63ce95, 0x1a225ceb,
- 0x8b277743, 0x2e66e53d, 0xc448254e, 0x6109b730,
- 0x15f9d359, 0xb0b84127, 0x5a968154, 0xffd7132a,
- 0xb3764986, 0x1637dbf8, 0xfc191b8b, 0x595889f5,
- 0x2da8ed9c, 0x88e97fe2, 0x62c7bf91, 0xc7862def,
- 0xfb850ac9, 0x5ec498b7, 0xb4ea58c4, 0x11abcaba,
- 0x655baed3, 0xc01a3cad, 0x2a34fcde, 0x8f756ea0,
- 0xc3d4340c, 0x6695a672, 0x8cbb6601, 0x29faf47f,
- 0x5d0a9016, 0xf84b0268, 0x1265c21b, 0xb7245065,
- 0x6a638c57, 0xcf221e29, 0x250cde5a, 0x804d4c24,
- 0xf4bd284d, 0x51fcba33, 0xbbd27a40, 0x1e93e83e,
- 0x5232b292, 0xf77320ec, 0x1d5de09f, 0xb81c72e1,
- 0xccec1688, 0x69ad84f6, 0x83834485, 0x26c2d6fb,
- 0x1ac1f1dd, 0xbf8063a3, 0x55aea3d0, 0xf0ef31ae,
- 0x841f55c7, 0x215ec7b9, 0xcb7007ca, 0x6e3195b4,
- 0x2290cf18, 0x87d15d66, 0x6dff9d15, 0xc8be0f6b,
- 0xbc4e6b02, 0x190ff97c, 0xf321390f, 0x5660ab71,
- 0x4c42f79a, 0xe90365e4, 0x032da597, 0xa66c37e9,
- 0xd29c5380, 0x77ddc1fe, 0x9df3018d, 0x38b293f3,
- 0x7413c95f, 0xd1525b21, 0x3b7c9b52, 0x9e3d092c,
- 0xeacd6d45, 0x4f8cff3b, 0xa5a23f48, 0x00e3ad36,
- 0x3ce08a10, 0x99a1186e, 0x738fd81d, 0xd6ce4a63,
- 0xa23e2e0a, 0x077fbc74, 0xed517c07, 0x4810ee79,
- 0x04b1b4d5, 0xa1f026ab, 0x4bdee6d8, 0xee9f74a6,
- 0x9a6f10cf, 0x3f2e82b1, 0xd50042c2, 0x7041d0bc,
- 0xad060c8e, 0x08479ef0, 0xe2695e83, 0x4728ccfd,
- 0x33d8a894, 0x96993aea, 0x7cb7fa99, 0xd9f668e7,
- 0x9557324b, 0x3016a035, 0xda386046, 0x7f79f238,
- 0x0b899651, 0xaec8042f, 0x44e6c45c, 0xe1a75622,
- 0xdda47104, 0x78e5e37a, 0x92cb2309, 0x378ab177,
- 0x437ad51e, 0xe63b4760, 0x0c158713, 0xa954156d,
- 0xe5f54fc1, 0x40b4ddbf, 0xaa9a1dcc, 0x0fdb8fb2,
- 0x7b2bebdb, 0xde6a79a5, 0x3444b9d6, 0x91052ba8
-};
-static const uint32_t table3_[256] = {
- 0x00000000, 0xdd45aab8, 0xbf672381, 0x62228939,
- 0x7b2231f3, 0xa6679b4b, 0xc4451272, 0x1900b8ca,
- 0xf64463e6, 0x2b01c95e, 0x49234067, 0x9466eadf,
- 0x8d665215, 0x5023f8ad, 0x32017194, 0xef44db2c,
- 0xe964b13d, 0x34211b85, 0x560392bc, 0x8b463804,
- 0x924680ce, 0x4f032a76, 0x2d21a34f, 0xf06409f7,
- 0x1f20d2db, 0xc2657863, 0xa047f15a, 0x7d025be2,
- 0x6402e328, 0xb9474990, 0xdb65c0a9, 0x06206a11,
- 0xd725148b, 0x0a60be33, 0x6842370a, 0xb5079db2,
- 0xac072578, 0x71428fc0, 0x136006f9, 0xce25ac41,
- 0x2161776d, 0xfc24ddd5, 0x9e0654ec, 0x4343fe54,
- 0x5a43469e, 0x8706ec26, 0xe524651f, 0x3861cfa7,
- 0x3e41a5b6, 0xe3040f0e, 0x81268637, 0x5c632c8f,
- 0x45639445, 0x98263efd, 0xfa04b7c4, 0x27411d7c,
- 0xc805c650, 0x15406ce8, 0x7762e5d1, 0xaa274f69,
- 0xb327f7a3, 0x6e625d1b, 0x0c40d422, 0xd1057e9a,
- 0xaba65fe7, 0x76e3f55f, 0x14c17c66, 0xc984d6de,
- 0xd0846e14, 0x0dc1c4ac, 0x6fe34d95, 0xb2a6e72d,
- 0x5de23c01, 0x80a796b9, 0xe2851f80, 0x3fc0b538,
- 0x26c00df2, 0xfb85a74a, 0x99a72e73, 0x44e284cb,
- 0x42c2eeda, 0x9f874462, 0xfda5cd5b, 0x20e067e3,
- 0x39e0df29, 0xe4a57591, 0x8687fca8, 0x5bc25610,
- 0xb4868d3c, 0x69c32784, 0x0be1aebd, 0xd6a40405,
- 0xcfa4bccf, 0x12e11677, 0x70c39f4e, 0xad8635f6,
- 0x7c834b6c, 0xa1c6e1d4, 0xc3e468ed, 0x1ea1c255,
- 0x07a17a9f, 0xdae4d027, 0xb8c6591e, 0x6583f3a6,
- 0x8ac7288a, 0x57828232, 0x35a00b0b, 0xe8e5a1b3,
- 0xf1e51979, 0x2ca0b3c1, 0x4e823af8, 0x93c79040,
- 0x95e7fa51, 0x48a250e9, 0x2a80d9d0, 0xf7c57368,
- 0xeec5cba2, 0x3380611a, 0x51a2e823, 0x8ce7429b,
- 0x63a399b7, 0xbee6330f, 0xdcc4ba36, 0x0181108e,
- 0x1881a844, 0xc5c402fc, 0xa7e68bc5, 0x7aa3217d,
- 0x52a0c93f, 0x8fe56387, 0xedc7eabe, 0x30824006,
- 0x2982f8cc, 0xf4c75274, 0x96e5db4d, 0x4ba071f5,
- 0xa4e4aad9, 0x79a10061, 0x1b838958, 0xc6c623e0,
- 0xdfc69b2a, 0x02833192, 0x60a1b8ab, 0xbde41213,
- 0xbbc47802, 0x6681d2ba, 0x04a35b83, 0xd9e6f13b,
- 0xc0e649f1, 0x1da3e349, 0x7f816a70, 0xa2c4c0c8,
- 0x4d801be4, 0x90c5b15c, 0xf2e73865, 0x2fa292dd,
- 0x36a22a17, 0xebe780af, 0x89c50996, 0x5480a32e,
- 0x8585ddb4, 0x58c0770c, 0x3ae2fe35, 0xe7a7548d,
- 0xfea7ec47, 0x23e246ff, 0x41c0cfc6, 0x9c85657e,
- 0x73c1be52, 0xae8414ea, 0xcca69dd3, 0x11e3376b,
- 0x08e38fa1, 0xd5a62519, 0xb784ac20, 0x6ac10698,
- 0x6ce16c89, 0xb1a4c631, 0xd3864f08, 0x0ec3e5b0,
- 0x17c35d7a, 0xca86f7c2, 0xa8a47efb, 0x75e1d443,
- 0x9aa50f6f, 0x47e0a5d7, 0x25c22cee, 0xf8878656,
- 0xe1873e9c, 0x3cc29424, 0x5ee01d1d, 0x83a5b7a5,
- 0xf90696d8, 0x24433c60, 0x4661b559, 0x9b241fe1,
- 0x8224a72b, 0x5f610d93, 0x3d4384aa, 0xe0062e12,
- 0x0f42f53e, 0xd2075f86, 0xb025d6bf, 0x6d607c07,
- 0x7460c4cd, 0xa9256e75, 0xcb07e74c, 0x16424df4,
- 0x106227e5, 0xcd278d5d, 0xaf050464, 0x7240aedc,
- 0x6b401616, 0xb605bcae, 0xd4273597, 0x09629f2f,
- 0xe6264403, 0x3b63eebb, 0x59416782, 0x8404cd3a,
- 0x9d0475f0, 0x4041df48, 0x22635671, 0xff26fcc9,
- 0x2e238253, 0xf36628eb, 0x9144a1d2, 0x4c010b6a,
- 0x5501b3a0, 0x88441918, 0xea669021, 0x37233a99,
- 0xd867e1b5, 0x05224b0d, 0x6700c234, 0xba45688c,
- 0xa345d046, 0x7e007afe, 0x1c22f3c7, 0xc167597f,
- 0xc747336e, 0x1a0299d6, 0x782010ef, 0xa565ba57,
- 0xbc65029d, 0x6120a825, 0x0302211c, 0xde478ba4,
- 0x31035088, 0xec46fa30, 0x8e647309, 0x5321d9b1,
- 0x4a21617b, 0x9764cbc3, 0xf54642fa, 0x2803e842
-};
+namespace {
-// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
-static inline uint32_t LE_LOAD32(const uint8_t *p) {
- return DecodeFixed32(reinterpret_cast<const char*>(p));
+const uint32_t kByteExtensionTable[256] = {
+ 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
+ 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
+ 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c,
+ 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
+ 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc,
+ 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
+ 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512,
+ 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
+ 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad,
+ 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
+ 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf,
+ 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
+ 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f,
+ 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
+ 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f,
+ 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
+ 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e,
+ 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
+ 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e,
+ 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
+ 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de,
+ 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
+ 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4,
+ 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
+ 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b,
+ 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
+ 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5,
+ 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
+ 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975,
+ 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
+ 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905,
+ 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
+ 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8,
+ 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
+ 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8,
+ 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
+ 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78,
+ 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
+ 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6,
+ 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
+ 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69,
+ 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
+ 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351};
+
+const uint32_t kStrideExtensionTable0[256] = {
+ 0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1,
+ 0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76,
+ 0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526,
+ 0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478,
+ 0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b,
+ 0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229,
+ 0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a,
+ 0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664,
+ 0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34,
+ 0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3,
+ 0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69,
+ 0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37,
+ 0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924,
+ 0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0,
+ 0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3,
+ 0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad,
+ 0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b,
+ 0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc,
+ 0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac,
+ 0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2,
+ 0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1,
+ 0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7,
+ 0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4,
+ 0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa,
+ 0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa,
+ 0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d,
+ 0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb,
+ 0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5,
+ 0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6,
+ 0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572,
+ 0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061,
+ 0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f,
+ 0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5,
+ 0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262,
+ 0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32,
+ 0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c,
+ 0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f,
+ 0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d,
+ 0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e,
+ 0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970,
+ 0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120,
+ 0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7,
+ 0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433};
+
+const uint32_t kStrideExtensionTable1[256] = {
+ 0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af,
+ 0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818,
+ 0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13,
+ 0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576,
+ 0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828,
+ 0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60,
+ 0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e,
+ 0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b,
+ 0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50,
+ 0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7,
+ 0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3,
+ 0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86,
+ 0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8,
+ 0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a,
+ 0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864,
+ 0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101,
+ 0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0,
+ 0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917,
+ 0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c,
+ 0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479,
+ 0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927,
+ 0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880,
+ 0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de,
+ 0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb,
+ 0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0,
+ 0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607,
+ 0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6,
+ 0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3,
+ 0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d,
+ 0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f,
+ 0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21,
+ 0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744,
+ 0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240,
+ 0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7,
+ 0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc,
+ 0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199,
+ 0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7,
+ 0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f,
+ 0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1,
+ 0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4,
+ 0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf,
+ 0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708,
+ 0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1};
+
+const uint32_t kStrideExtensionTable2[256] = {
+ 0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4,
+ 0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418,
+ 0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37,
+ 0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0,
+ 0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9,
+ 0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f,
+ 0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276,
+ 0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81,
+ 0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae,
+ 0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42,
+ 0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328,
+ 0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf,
+ 0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6,
+ 0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c,
+ 0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605,
+ 0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2,
+ 0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1,
+ 0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d,
+ 0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972,
+ 0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185,
+ 0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c,
+ 0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0,
+ 0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9,
+ 0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e,
+ 0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361,
+ 0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d,
+ 0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce,
+ 0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339,
+ 0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20,
+ 0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa,
+ 0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3,
+ 0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614,
+ 0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e,
+ 0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092,
+ 0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd,
+ 0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a,
+ 0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53,
+ 0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5,
+ 0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc,
+ 0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b,
+ 0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124,
+ 0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8,
+ 0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d};
+
+const uint32_t kStrideExtensionTable3[256] = {
+ 0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115,
+ 0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4,
+ 0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541,
+ 0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7,
+ 0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d,
+ 0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d,
+ 0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7,
+ 0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241,
+ 0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4,
+ 0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615,
+ 0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02,
+ 0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4,
+ 0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce,
+ 0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0,
+ 0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a,
+ 0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c,
+ 0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297,
+ 0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56,
+ 0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3,
+ 0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725,
+ 0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f,
+ 0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b,
+ 0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721,
+ 0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7,
+ 0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52,
+ 0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293,
+ 0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978,
+ 0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e,
+ 0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4,
+ 0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca,
+ 0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0,
+ 0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06,
+ 0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611,
+ 0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0,
+ 0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245,
+ 0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3,
+ 0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189,
+ 0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689,
+ 0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3,
+ 0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545,
+ 0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0,
+ 0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111,
+ 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa};
+
+// CRCs are pre- and post- conditioned by xoring with all ones.
+static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU);
+
+// Reads a little-endian 32-bit integer from a 32-bit-aligned buffer.
+inline uint32_t ReadUint32LE(const uint8_t* buffer) {
+ return DecodeFixed32(reinterpret_cast<const char*>(buffer));
}
+// Returns the smallest address >= the given address that is aligned to N bytes.
+//
+// N must be a power of two.
+template <int N>
+constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
+ return reinterpret_cast<uint8_t*>(
+ (reinterpret_cast<uintptr_t>(pointer) + (N - 1))
+ & ~static_cast<uintptr_t>(N - 1));
+}
+
+} // namespace
+
// Determine if the CPU running this program can accelerate the CRC32C
// calculation.
static bool CanAccelerateCRC32C() {
return port::AcceleratedCRC32C(crc, buf, size);
}
- const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
- const uint8_t *e = p + size;
- uint32_t l = crc ^ 0xffffffffu;
+ const uint8_t* p = reinterpret_cast<const uint8_t*>(buf);
+ const uint8_t* e = p + size;
+ uint32_t l = crc ^ kCRC32Xor;
+
+// Process one byte at a time.
+#define STEP1 \
+ do { \
+ int c = (l & 0xff) ^ *p++; \
+ l = kByteExtensionTable[c] ^ (l >> 8); \
+ } while (0)
-#define STEP1 do { \
- int c = (l & 0xff) ^ *p++; \
- l = table0_[c] ^ (l >> 8); \
-} while (0)
-#define STEP4 do { \
- uint32_t c = l ^ LE_LOAD32(p); \
- p += 4; \
- l = table3_[c & 0xff] ^ \
- table2_[(c >> 8) & 0xff] ^ \
- table1_[(c >> 16) & 0xff] ^ \
- table0_[c >> 24]; \
-} while (0)
+// Process one of the 4 strides of 4-byte data.
+#define STEP4(s) \
+ do { \
+ crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \
+ kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \
+ kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \
+ kStrideExtensionTable0[crc##s >> 24]; \
+ } while (0)
- // Point x at first 4-byte aligned byte in string. This might be
- // just past the end of the string.
- const uintptr_t pval = reinterpret_cast<uintptr_t>(p);
- const uint8_t* x = reinterpret_cast<const uint8_t*>(((pval + 3) >> 2) << 2);
+// Process a 16-byte swath of 4 strides, each of which has 4 bytes of data.
+#define STEP16 \
+ do { \
+ STEP4(0); \
+ STEP4(1); \
+ STEP4(2); \
+ STEP4(3); \
+ p += 16; \
+ } while (0)
+
+// Process 4 bytes that were already loaded into a word.
+#define STEP4W(w) \
+ do { \
+ w ^= l; \
+ for (size_t i = 0; i < 4; ++i) { \
+ w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \
+ } \
+ l = w; \
+ } while (0)
+
+ // Point x at first 4-byte aligned byte in the buffer. This might be past the
+ // end of the buffer.
+ const uint8_t* x = RoundUp<4>(p);
if (x <= e) {
- // Process bytes until finished or p is 4-byte aligned
+ // Process bytes p is 4-byte aligned.
while (p != x) {
STEP1;
}
}
- // Process bytes 16 at a time
- while ((e-p) >= 16) {
- STEP4; STEP4; STEP4; STEP4;
- }
- // Process bytes 4 at a time
- while ((e-p) >= 4) {
- STEP4;
+
+ if ((e - p) >= 16) {
+ // Load a 16-byte swath into the stride partial results.
+ uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l;
+ uint32_t crc1 = ReadUint32LE(p + 1 * 4);
+ uint32_t crc2 = ReadUint32LE(p + 2 * 4);
+ uint32_t crc3 = ReadUint32LE(p + 3 * 4);
+ p += 16;
+
+ // It is possible to get better speeds (at least on x86) by interleaving
+ // prefetching 256 bytes ahead with processing 64 bytes at a time. See the
+ // portable implementation in https://github.com/google/crc32c/.
+
+ // Process one 16-byte swath at a time.
+ while ((e - p) >= 16) {
+ STEP16;
+ }
+
+ // Advance one word at a time as far as possible.
+ while ((e - p) >= 4) {
+ STEP4(0);
+ uint32_t tmp = crc0;
+ crc0 = crc1;
+ crc1 = crc2;
+ crc2 = crc3;
+ crc3 = tmp;
+ p += 4;
+ }
+
+ // Combine the 4 partial stride results.
+ l = 0;
+ STEP4W(crc0);
+ STEP4W(crc1);
+ STEP4W(crc2);
+ STEP4W(crc3);
}
- // Process the last few bytes
+
+ // Process the last few bytes.
while (p != e) {
STEP1;
}
+#undef STEP4W
+#undef STEP16
#undef STEP4
#undef STEP1
- return l ^ 0xffffffffu;
+ return l ^ kCRC32Xor;
}
} // namespace crc32c
// Return the crc32c of concat(A, data[0,n-1]) where init_crc is the
// crc32c of some string A. Extend() is often used to maintain the
// crc32c of a stream of data.
-extern uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
+uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
// Return the crc32c of data[0,n-1]
inline uint32_t Value(const char* data, size_t n) {
}
void Log(Logger* info_log, const char* format, ...) {
- if (info_log != NULL) {
+ if (info_log != nullptr) {
va_list ap;
va_start(ap, format);
info_log->Logv(format, ap);
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <dirent.h>
-#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
-#include <time.h>
#include <unistd.h>
-#include <deque>
+
+#include <atomic>
+#include <cerrno>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
#include <limits>
+#include <queue>
#include <set>
+#include <string>
+#include <thread>
+#include <type_traits>
+#include <utility>
+
#include "leveldb/env.h"
#include "leveldb/slice.h"
+#include "leveldb/status.h"
#include "port/port.h"
-#include "util/logging.h"
-#include "util/mutexlock.h"
+#include "port/thread_annotations.h"
#include "util/posix_logger.h"
#include "util/env_posix_test_helper.h"
namespace {
-static int open_read_only_file_limit = -1;
-static int mmap_limit = -1;
+// Set by EnvPosixTestHelper::SetReadOnlyMMapLimit() and MaxOpenFiles().
+int g_open_read_only_file_limit = -1;
+
+// Up to 1000 mmap regions for 64-bit binaries; none for 32-bit.
+constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
+
+// Can be set using EnvPosixTestHelper::SetReadOnlyMMapLimit.
+int g_mmap_limit = kDefaultMmapLimit;
+
+constexpr const size_t kWritableFileBufferSize = 65536;
-static Status IOError(const std::string& context, int err_number) {
- return Status::IOError(context, strerror(err_number));
+Status PosixError(const std::string& context, int error_number) {
+ if (error_number == ENOENT) {
+ return Status::NotFound(context, std::strerror(error_number));
+ } else {
+ return Status::IOError(context, std::strerror(error_number));
+ }
}
// Helper class to limit resource usage to avoid exhaustion.
// Currently used to limit read-only file descriptors and mmap file usage
-// so that we do not end up running out of file descriptors, virtual memory,
-// or running into kernel performance problems for very large databases.
+// so that we do not run out of file descriptors or virtual memory, or run into
+// kernel performance problems for very large databases.
class Limiter {
public:
- // Limit maximum number of resources to |n|.
- Limiter(intptr_t n) {
- SetAllowed(n);
- }
+ // Limit maximum number of resources to |max_acquires|.
+ Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+
+ Limiter(const Limiter&) = delete;
+ Limiter operator=(const Limiter&) = delete;
// If another resource is available, acquire it and return true.
// Else return false.
bool Acquire() {
- if (GetAllowed() <= 0) {
- return false;
- }
- MutexLock l(&mu_);
- intptr_t x = GetAllowed();
- if (x <= 0) {
- return false;
- } else {
- SetAllowed(x - 1);
+ int old_acquires_allowed =
+ acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
+
+ if (old_acquires_allowed > 0)
return true;
- }
+
+ acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+ return false;
}
// Release a resource acquired by a previous call to Acquire() that returned
// true.
void Release() {
- MutexLock l(&mu_);
- SetAllowed(GetAllowed() + 1);
+ acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
}
private:
- port::Mutex mu_;
- port::AtomicPointer allowed_;
-
- intptr_t GetAllowed() const {
- return reinterpret_cast<intptr_t>(allowed_.Acquire_Load());
- }
-
- // REQUIRES: mu_ must be held
- void SetAllowed(intptr_t v) {
- allowed_.Release_Store(reinterpret_cast<void*>(v));
- }
-
- Limiter(const Limiter&);
- void operator=(const Limiter&);
+ // The number of available resources.
+ //
+ // This is a counter and is not tied to the invariants of any other class, so
+ // it can be operated on safely using std::memory_order_relaxed.
+ std::atomic<int> acquires_allowed_;
};
-class PosixSequentialFile: public SequentialFile {
- private:
- std::string filename_;
- FILE* file_;
-
+// Implements sequential read access in a file using read().
+//
+// Instances of this class are thread-friendly but not thread-safe, as required
+// by the SequentialFile API.
+class PosixSequentialFile final : public SequentialFile {
public:
- PosixSequentialFile(const std::string& fname, FILE* f)
- : filename_(fname), file_(f) { }
- virtual ~PosixSequentialFile() { fclose(file_); }
-
- virtual Status Read(size_t n, Slice* result, char* scratch) {
- Status s;
- size_t r = fread_unlocked(scratch, 1, n, file_);
- *result = Slice(scratch, r);
- if (r < n) {
- if (feof(file_)) {
- // We leave status as ok if we hit the end of the file
- } else {
- // A partial read with an error: return a non-ok status
- s = IOError(filename_, errno);
+ PosixSequentialFile(std::string filename, int fd)
+ : fd_(fd), filename_(filename) {}
+ ~PosixSequentialFile() override { close(fd_); }
+
+ Status Read(size_t n, Slice* result, char* scratch) override {
+ Status status;
+ while (true) {
+ ::ssize_t read_size = ::read(fd_, scratch, n);
+ if (read_size < 0) { // Read error.
+ if (errno == EINTR) {
+ continue; // Retry
+ }
+ status = PosixError(filename_, errno);
+ break;
}
+ *result = Slice(scratch, read_size);
+ break;
}
- return s;
+ return status;
}
- virtual Status Skip(uint64_t n) {
- if (fseek(file_, n, SEEK_CUR)) {
- return IOError(filename_, errno);
+ Status Skip(uint64_t n) override {
+ if (::lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) {
+ return PosixError(filename_, errno);
}
return Status::OK();
}
-};
-// pread() based random-access
-class PosixRandomAccessFile: public RandomAccessFile {
private:
- std::string filename_;
- bool temporary_fd_; // If true, fd_ is -1 and we open on every read.
- int fd_;
- Limiter* limiter_;
+ const int fd_;
+ const std::string filename_;
+};
+// Implements random read access in a file using pread().
+//
+// Instances of this class are thread-safe, as required by the RandomAccessFile
+// API. Instances are immutable and Read() only calls thread-safe library
+// functions.
+class PosixRandomAccessFile final : public RandomAccessFile {
public:
- PosixRandomAccessFile(const std::string& fname, int fd, Limiter* limiter)
- : filename_(fname), fd_(fd), limiter_(limiter) {
- temporary_fd_ = !limiter->Acquire();
- if (temporary_fd_) {
- // Open file on every access.
- close(fd_);
- fd_ = -1;
+ // The new instance takes ownership of |fd|. |fd_limiter| must outlive this
+ // instance, and will be used to determine if .
+ PosixRandomAccessFile(std::string filename, int fd, Limiter* fd_limiter)
+ : has_permanent_fd_(fd_limiter->Acquire()),
+ fd_(has_permanent_fd_ ? fd : -1),
+ fd_limiter_(fd_limiter),
+ filename_(std::move(filename)) {
+ if (!has_permanent_fd_) {
+ assert(fd_ == -1);
+ ::close(fd); // The file will be opened on every read.
}
}
- virtual ~PosixRandomAccessFile() {
- if (!temporary_fd_) {
- close(fd_);
- limiter_->Release();
+ ~PosixRandomAccessFile() override {
+ if (has_permanent_fd_) {
+ assert(fd_ != -1);
+ ::close(fd_);
+ fd_limiter_->Release();
}
}
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
int fd = fd_;
- if (temporary_fd_) {
- fd = open(filename_.c_str(), O_RDONLY);
+ if (!has_permanent_fd_) {
+ fd = ::open(filename_.c_str(), O_RDONLY);
if (fd < 0) {
- return IOError(filename_, errno);
+ return PosixError(filename_, errno);
}
}
- Status s;
- ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset));
- *result = Slice(scratch, (r < 0) ? 0 : r);
- if (r < 0) {
- // An error: return a non-ok status
- s = IOError(filename_, errno);
+ assert(fd != -1);
+
+ Status status;
+ ssize_t read_size = ::pread(fd, scratch, n, static_cast<off_t>(offset));
+ *result = Slice(scratch, (read_size < 0) ? 0 : read_size);
+ if (read_size < 0) {
+ // An error: return a non-ok status.
+ status = PosixError(filename_, errno);
}
- if (temporary_fd_) {
+ if (!has_permanent_fd_) {
// Close the temporary file descriptor opened earlier.
- close(fd);
+ assert(fd != fd_);
+ ::close(fd);
}
- return s;
+ return status;
}
-};
-// mmap() based random-access
-class PosixMmapReadableFile: public RandomAccessFile {
private:
- std::string filename_;
- void* mmapped_region_;
- size_t length_;
- Limiter* limiter_;
+ const bool has_permanent_fd_; // If false, the file is opened on every read.
+ const int fd_; // -1 if has_permanent_fd_ is false.
+ Limiter* const fd_limiter_;
+ const std::string filename_;
+};
+// Implements random read access in a file using mmap().
+//
+// Instances of this class are thread-safe, as required by the RandomAccessFile
+// API. Instances are immutable and Read() only calls thread-safe library
+// functions.
+class PosixMmapReadableFile final : public RandomAccessFile {
public:
- // base[0,length-1] contains the mmapped contents of the file.
- PosixMmapReadableFile(const std::string& fname, void* base, size_t length,
- Limiter* limiter)
- : filename_(fname), mmapped_region_(base), length_(length),
- limiter_(limiter) {
- }
-
- virtual ~PosixMmapReadableFile() {
- munmap(mmapped_region_, length_);
- limiter_->Release();
- }
-
- virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
- Status s;
+ // mmap_base[0, length-1] points to the memory-mapped contents of the file. It
+ // must be the result of a successful call to mmap(). This instances takes
+ // over the ownership of the region.
+ //
+ // |mmap_limiter| must outlive this instance. The caller must have already
+ // aquired the right to use one mmap region, which will be released when this
+ // instance is destroyed.
+ PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
+ Limiter* mmap_limiter)
+ : mmap_base_(mmap_base), length_(length), mmap_limiter_(mmap_limiter),
+ filename_(std::move(filename)) {}
+
+ ~PosixMmapReadableFile() override {
+ ::munmap(static_cast<void*>(mmap_base_), length_);
+ mmap_limiter_->Release();
+ }
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
if (offset + n > length_) {
*result = Slice();
- s = IOError(filename_, EINVAL);
- } else {
- *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
+ return PosixError(filename_, EINVAL);
}
- return s;
+
+ *result = Slice(mmap_base_ + offset, n);
+ return Status::OK();
}
-};
-class PosixWritableFile : public WritableFile {
private:
- std::string filename_;
- FILE* file_;
+ char* const mmap_base_;
+ const size_t length_;
+ Limiter* const mmap_limiter_;
+ const std::string filename_;
+};
+class PosixWritableFile final : public WritableFile {
public:
- PosixWritableFile(const std::string& fname, FILE* f)
- : filename_(fname), file_(f) { }
+ PosixWritableFile(std::string filename, int fd)
+ : pos_(0), fd_(fd), is_manifest_(IsManifest(filename)),
+ filename_(std::move(filename)), dirname_(Dirname(filename_)) {}
- ~PosixWritableFile() {
- if (file_ != NULL) {
+ ~PosixWritableFile() override {
+ if (fd_ >= 0) {
// Ignoring any potential errors
- fclose(file_);
+ Close();
}
}
- virtual Status Append(const Slice& data) {
- size_t r = fwrite_unlocked(data.data(), 1, data.size(), file_);
- if (r != data.size()) {
- return IOError(filename_, errno);
+ Status Append(const Slice& data) override {
+ size_t write_size = data.size();
+ const char* write_data = data.data();
+
+ // Fit as much as possible into buffer.
+ size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
+ std::memcpy(buf_ + pos_, write_data, copy_size);
+ write_data += copy_size;
+ write_size -= copy_size;
+ pos_ += copy_size;
+ if (write_size == 0) {
+ return Status::OK();
}
- return Status::OK();
+
+ // Can't fit in buffer, so need to do at least one write.
+ Status status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
+ }
+
+ // Small writes go to buffer, large writes are written directly.
+ if (write_size < kWritableFileBufferSize) {
+ std::memcpy(buf_, write_data, write_size);
+ pos_ = write_size;
+ return Status::OK();
+ }
+ return WriteUnbuffered(write_data, write_size);
+ }
+
+ Status Close() override {
+ Status status = FlushBuffer();
+ const int close_result = ::close(fd_);
+ if (close_result < 0 && status.ok()) {
+ status = PosixError(filename_, errno);
+ }
+ fd_ = -1;
+ return status;
}
- virtual Status Close() {
- Status result;
- if (fclose(file_) != 0) {
- result = IOError(filename_, errno);
+ Status Flush() override {
+ return FlushBuffer();
+ }
+
+ Status Sync() override {
+ // Ensure new files referred to by the manifest are in the filesystem.
+ //
+ // This needs to happen before the manifest file is flushed to disk, to
+ // avoid crashing in a state where the manifest refers to files that are not
+ // yet on disk.
+ Status status = SyncDirIfManifest();
+ if (!status.ok()) {
+ return status;
+ }
+
+ status = FlushBuffer();
+ if (!status.ok()) {
+ return status;
}
- file_ = NULL;
- return result;
+
+ return SyncFd(fd_, filename_);
}
- virtual Status Flush() {
- if (fflush_unlocked(file_) != 0) {
- return IOError(filename_, errno);
+ private:
+ Status FlushBuffer() {
+ Status status = WriteUnbuffered(buf_, pos_);
+ pos_ = 0;
+ return status;
+ }
+
+ Status WriteUnbuffered(const char* data, size_t size) {
+ while (size > 0) {
+ ssize_t write_result = ::write(fd_, data, size);
+ if (write_result < 0) {
+ if (errno == EINTR) {
+ continue; // Retry
+ }
+ return PosixError(filename_, errno);
+ }
+ data += write_result;
+ size -= write_result;
}
return Status::OK();
}
Status SyncDirIfManifest() {
- const char* f = filename_.c_str();
- const char* sep = strrchr(f, '/');
- Slice basename;
- std::string dir;
- if (sep == NULL) {
- dir = ".";
- basename = f;
+ Status status;
+ if (!is_manifest_) {
+ return status;
+ }
+
+ int fd = ::open(dirname_.c_str(), O_RDONLY);
+ if (fd < 0) {
+ status = PosixError(dirname_, errno);
} else {
- dir = std::string(f, sep - f);
- basename = sep + 1;
+ status = SyncFd(fd, dirname_);
+ ::close(fd);
+ }
+ return status;
+ }
+
+ // Ensures that all the caches associated with the given file descriptor's
+ // data are flushed all the way to durable media, and can withstand power
+ // failures.
+ //
+ // The path argument is only used to populate the description string in the
+ // returned Status if an error occurs.
+ static Status SyncFd(int fd, const std::string& fd_path) {
+#if HAVE_FULLFSYNC
+ // On macOS and iOS, fsync() doesn't guarantee durability past power
+ // failures. fcntl(F_FULLFSYNC) is required for that purpose. Some
+ // filesystems don't support fcntl(F_FULLFSYNC), and require a fallback to
+ // fsync().
+ if (::fcntl(fd, F_FULLFSYNC) == 0) {
+ return Status::OK();
}
- Status s;
- if (basename.starts_with("MANIFEST")) {
- int fd = open(dir.c_str(), O_RDONLY);
- if (fd < 0) {
- s = IOError(dir, errno);
- } else {
- if (fsync(fd) < 0) {
- s = IOError(dir, errno);
- }
- close(fd);
- }
+#endif // HAVE_FULLFSYNC
+
+#if HAVE_FDATASYNC
+ bool sync_success = ::fdatasync(fd) == 0;
+#else
+ bool sync_success = ::fsync(fd) == 0;
+#endif // HAVE_FDATASYNC
+
+ if (sync_success) {
+ return Status::OK();
}
- return s;
+ return PosixError(fd_path, errno);
}
- virtual Status Sync() {
- // Ensure new files referred to by the manifest are in the filesystem.
- Status s = SyncDirIfManifest();
- if (!s.ok()) {
- return s;
+ // Returns the directory name in a path pointing to a file.
+ //
+ // Returns "." if the path does not contain any directory separator.
+ static std::string Dirname(const std::string& filename) {
+ std::string::size_type separator_pos = filename.rfind('/');
+ if (separator_pos == std::string::npos) {
+ return std::string(".");
}
- if (fflush_unlocked(file_) != 0 ||
- fdatasync(fileno(file_)) != 0) {
- s = Status::IOError(filename_, strerror(errno));
+ // The filename component should not contain a path separator. If it does,
+ // the splitting was done incorrectly.
+ assert(filename.find('/', separator_pos + 1) == std::string::npos);
+
+ return filename.substr(0, separator_pos);
+ }
+
+ // Extracts the file name from a path pointing to a file.
+ //
+ // The returned Slice points to |filename|'s data buffer, so it is only valid
+ // while |filename| is alive and unchanged.
+ static Slice Basename(const std::string& filename) {
+ std::string::size_type separator_pos = filename.rfind('/');
+ if (separator_pos == std::string::npos) {
+ return Slice(filename);
}
- return s;
+ // The filename component should not contain a path separator. If it does,
+ // the splitting was done incorrectly.
+ assert(filename.find('/', separator_pos + 1) == std::string::npos);
+
+ return Slice(filename.data() + separator_pos + 1,
+ filename.length() - separator_pos - 1);
}
+
+ // True if the given file is a manifest file.
+ static bool IsManifest(const std::string& filename) {
+ return Basename(filename).starts_with("MANIFEST");
+ }
+
+ // buf_[0, pos_ - 1] contains data to be written to fd_.
+ char buf_[kWritableFileBufferSize];
+ size_t pos_;
+ int fd_;
+
+ const bool is_manifest_; // True if the file's name starts with MANIFEST.
+ const std::string filename_;
+ const std::string dirname_; // The directory of filename_.
};
-static int LockOrUnlock(int fd, bool lock) {
+int LockOrUnlock(int fd, bool lock) {
errno = 0;
- struct flock f;
- memset(&f, 0, sizeof(f));
- f.l_type = (lock ? F_WRLCK : F_UNLCK);
- f.l_whence = SEEK_SET;
- f.l_start = 0;
- f.l_len = 0; // Lock/unlock entire file
- return fcntl(fd, F_SETLK, &f);
+ struct ::flock file_lock_info;
+ std::memset(&file_lock_info, 0, sizeof(file_lock_info));
+ file_lock_info.l_type = (lock ? F_WRLCK : F_UNLCK);
+ file_lock_info.l_whence = SEEK_SET;
+ file_lock_info.l_start = 0;
+ file_lock_info.l_len = 0; // Lock/unlock entire file.
+ return ::fcntl(fd, F_SETLK, &file_lock_info);
}
+// Instances are thread-safe because they are immutable.
class PosixFileLock : public FileLock {
public:
- int fd_;
- std::string name_;
+ PosixFileLock(int fd, std::string filename)
+ : fd_(fd), filename_(std::move(filename)) {}
+
+ int fd() const { return fd_; }
+ const std::string& filename() const { return filename_; }
+
+ private:
+ const int fd_;
+ const std::string filename_;
};
-// Set of locked files. We keep a separate set instead of just
-// relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide
-// any protection against multiple uses from the same process.
+// Tracks the files locked by PosixEnv::LockFile().
+//
+// We maintain a separate set instead of relying on fcntrl(F_SETLK) because
+// fcntl(F_SETLK) does not provide any protection against multiple uses from the
+// same process.
+//
+// Instances are thread-safe because all member data is guarded by a mutex.
class PosixLockTable {
- private:
- port::Mutex mu_;
- std::set<std::string> locked_files_;
public:
- bool Insert(const std::string& fname) {
- MutexLock l(&mu_);
- return locked_files_.insert(fname).second;
- }
- void Remove(const std::string& fname) {
- MutexLock l(&mu_);
+ bool Insert(const std::string& fname) LOCKS_EXCLUDED(mu_) {
+ mu_.Lock();
+ bool succeeded = locked_files_.insert(fname).second;
+ mu_.Unlock();
+ return succeeded;
+ }
+ void Remove(const std::string& fname) LOCKS_EXCLUDED(mu_) {
+ mu_.Lock();
locked_files_.erase(fname);
+ mu_.Unlock();
}
+
+ private:
+ port::Mutex mu_;
+ std::set<std::string> locked_files_ GUARDED_BY(mu_);
};
class PosixEnv : public Env {
public:
PosixEnv();
- virtual ~PosixEnv() {
- char msg[] = "Destroying Env::Default()\n";
- fwrite(msg, 1, sizeof(msg), stderr);
- abort();
- }
-
- virtual Status NewSequentialFile(const std::string& fname,
- SequentialFile** result) {
- FILE* f = fopen(fname.c_str(), "r");
- if (f == NULL) {
- *result = NULL;
- return IOError(fname, errno);
- } else {
- *result = new PosixSequentialFile(fname, f);
- return Status::OK();
+ ~PosixEnv() override {
+ static char msg[] = "PosixEnv singleton destroyed. Unsupported behavior!\n";
+ std::fwrite(msg, 1, sizeof(msg), stderr);
+ std::abort();
+ }
+
+ Status NewSequentialFile(const std::string& filename,
+ SequentialFile** result) override {
+ int fd = ::open(filename.c_str(), O_RDONLY);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
+
+ *result = new PosixSequentialFile(filename, fd);
+ return Status::OK();
}
- virtual Status NewRandomAccessFile(const std::string& fname,
- RandomAccessFile** result) {
- *result = NULL;
- Status s;
- int fd = open(fname.c_str(), O_RDONLY);
+ Status NewRandomAccessFile(const std::string& filename,
+ RandomAccessFile** result) override {
+ *result = nullptr;
+ int fd = ::open(filename.c_str(), O_RDONLY);
if (fd < 0) {
- s = IOError(fname, errno);
- } else if (mmap_limit_.Acquire()) {
- uint64_t size;
- s = GetFileSize(fname, &size);
- if (s.ok()) {
- void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
- if (base != MAP_FAILED) {
- *result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_);
- } else {
- s = IOError(fname, errno);
- }
- }
- close(fd);
- if (!s.ok()) {
- mmap_limit_.Release();
+ return PosixError(filename, errno);
+ }
+
+ if (!mmap_limiter_.Acquire()) {
+ *result = new PosixRandomAccessFile(filename, fd, &fd_limiter_);
+ return Status::OK();
+ }
+
+ uint64_t file_size;
+ Status status = GetFileSize(filename, &file_size);
+ if (status.ok()) {
+ void* mmap_base = ::mmap(/*addr=*/nullptr, file_size, PROT_READ,
+ MAP_SHARED, fd, 0);
+ if (mmap_base != MAP_FAILED) {
+ *result = new PosixMmapReadableFile(
+ filename, reinterpret_cast<char*>(mmap_base), file_size,
+ &mmap_limiter_);
+ } else {
+ status = PosixError(filename, errno);
}
- } else {
- *result = new PosixRandomAccessFile(fname, fd, &fd_limit_);
}
- return s;
+ ::close(fd);
+ if (!status.ok()) {
+ mmap_limiter_.Release();
+ }
+ return status;
}
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result) {
- Status s;
- FILE* f = fopen(fname.c_str(), "w");
- if (f == NULL) {
- *result = NULL;
- s = IOError(fname, errno);
- } else {
- *result = new PosixWritableFile(fname, f);
+ Status NewWritableFile(const std::string& filename,
+ WritableFile** result) override {
+ int fd = ::open(filename.c_str(), O_TRUNC | O_WRONLY | O_CREAT, 0644);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
- return s;
+
+ *result = new PosixWritableFile(filename, fd);
+ return Status::OK();
}
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result) {
- Status s;
- FILE* f = fopen(fname.c_str(), "a");
- if (f == NULL) {
- *result = NULL;
- s = IOError(fname, errno);
- } else {
- *result = new PosixWritableFile(fname, f);
+ Status NewAppendableFile(const std::string& filename,
+ WritableFile** result) override {
+ int fd = ::open(filename.c_str(), O_APPEND | O_WRONLY | O_CREAT, 0644);
+ if (fd < 0) {
+ *result = nullptr;
+ return PosixError(filename, errno);
}
- return s;
+
+ *result = new PosixWritableFile(filename, fd);
+ return Status::OK();
}
- virtual bool FileExists(const std::string& fname) {
- return access(fname.c_str(), F_OK) == 0;
+ bool FileExists(const std::string& filename) override {
+ return ::access(filename.c_str(), F_OK) == 0;
}
- virtual Status GetChildren(const std::string& dir,
- std::vector<std::string>* result) {
+ Status GetChildren(const std::string& directory_path,
+ std::vector<std::string>* result) override {
result->clear();
- DIR* d = opendir(dir.c_str());
- if (d == NULL) {
- return IOError(dir, errno);
+ ::DIR* dir = ::opendir(directory_path.c_str());
+ if (dir == nullptr) {
+ return PosixError(directory_path, errno);
}
- struct dirent* entry;
- while ((entry = readdir(d)) != NULL) {
- result->push_back(entry->d_name);
+ struct ::dirent* entry;
+ while ((entry = ::readdir(dir)) != nullptr) {
+ result->emplace_back(entry->d_name);
}
- closedir(d);
+ ::closedir(dir);
return Status::OK();
}
- virtual Status DeleteFile(const std::string& fname) {
- Status result;
- if (unlink(fname.c_str()) != 0) {
- result = IOError(fname, errno);
+ Status DeleteFile(const std::string& filename) override {
+ if (::unlink(filename.c_str()) != 0) {
+ return PosixError(filename, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status CreateDir(const std::string& name) {
- Status result;
- if (mkdir(name.c_str(), 0755) != 0) {
- result = IOError(name, errno);
+ Status CreateDir(const std::string& dirname) override {
+ if (::mkdir(dirname.c_str(), 0755) != 0) {
+ return PosixError(dirname, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status DeleteDir(const std::string& name) {
- Status result;
- if (rmdir(name.c_str()) != 0) {
- result = IOError(name, errno);
+ Status DeleteDir(const std::string& dirname) override {
+ if (::rmdir(dirname.c_str()) != 0) {
+ return PosixError(dirname, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status GetFileSize(const std::string& fname, uint64_t* size) {
- Status s;
- struct stat sbuf;
- if (stat(fname.c_str(), &sbuf) != 0) {
+ Status GetFileSize(const std::string& filename, uint64_t* size) override {
+ struct ::stat file_stat;
+ if (::stat(filename.c_str(), &file_stat) != 0) {
*size = 0;
- s = IOError(fname, errno);
- } else {
- *size = sbuf.st_size;
+ return PosixError(filename, errno);
}
- return s;
+ *size = file_stat.st_size;
+ return Status::OK();
}
- virtual Status RenameFile(const std::string& src, const std::string& target) {
- Status result;
- if (rename(src.c_str(), target.c_str()) != 0) {
- result = IOError(src, errno);
+ Status RenameFile(const std::string& from, const std::string& to) override {
+ if (std::rename(from.c_str(), to.c_str()) != 0) {
+ return PosixError(from, errno);
}
- return result;
+ return Status::OK();
}
- virtual Status LockFile(const std::string& fname, FileLock** lock) {
- *lock = NULL;
- Status result;
- int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644);
+ Status LockFile(const std::string& filename, FileLock** lock) override {
+ *lock = nullptr;
+
+ int fd = ::open(filename.c_str(), O_RDWR | O_CREAT, 0644);
if (fd < 0) {
- result = IOError(fname, errno);
- } else if (!locks_.Insert(fname)) {
- close(fd);
- result = Status::IOError("lock " + fname, "already held by process");
- } else if (LockOrUnlock(fd, true) == -1) {
- result = IOError("lock " + fname, errno);
- close(fd);
- locks_.Remove(fname);
- } else {
- PosixFileLock* my_lock = new PosixFileLock;
- my_lock->fd_ = fd;
- my_lock->name_ = fname;
- *lock = my_lock;
+ return PosixError(filename, errno);
+ }
+
+ if (!locks_.Insert(filename)) {
+ ::close(fd);
+ return Status::IOError("lock " + filename, "already held by process");
+ }
+
+ if (LockOrUnlock(fd, true) == -1) {
+ int lock_errno = errno;
+ ::close(fd);
+ locks_.Remove(filename);
+ return PosixError("lock " + filename, lock_errno);
}
- return result;
+
+ *lock = new PosixFileLock(fd, filename);
+ return Status::OK();
}
- virtual Status UnlockFile(FileLock* lock) {
- PosixFileLock* my_lock = reinterpret_cast<PosixFileLock*>(lock);
- Status result;
- if (LockOrUnlock(my_lock->fd_, false) == -1) {
- result = IOError("unlock", errno);
+ Status UnlockFile(FileLock* lock) override {
+ PosixFileLock* posix_file_lock = static_cast<PosixFileLock*>(lock);
+ if (LockOrUnlock(posix_file_lock->fd(), false) == -1) {
+ return PosixError("unlock " + posix_file_lock->filename(), errno);
}
- locks_.Remove(my_lock->name_);
- close(my_lock->fd_);
- delete my_lock;
- return result;
+ locks_.Remove(posix_file_lock->filename());
+ ::close(posix_file_lock->fd());
+ delete posix_file_lock;
+ return Status::OK();
}
- virtual void Schedule(void (*function)(void*), void* arg);
+ void Schedule(void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) override;
- virtual void StartThread(void (*function)(void* arg), void* arg);
+ void StartThread(void (*thread_main)(void* thread_main_arg),
+ void* thread_main_arg) override;
- virtual Status GetTestDirectory(std::string* result) {
- const char* env = getenv("TEST_TMPDIR");
+ Status GetTestDirectory(std::string* result) override {
+ const char* env = std::getenv("TEST_TMPDIR");
if (env && env[0] != '\0') {
*result = env;
} else {
char buf[100];
- snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid()));
+ std::snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d",
+ static_cast<int>(::geteuid()));
*result = buf;
}
- // Directory may already exist
+
+ // The CreateDir status is ignored because the directory may already exist.
CreateDir(*result);
- return Status::OK();
- }
- static uint64_t gettid() {
- pthread_t tid = pthread_self();
- uint64_t thread_id = 0;
- memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid)));
- return thread_id;
+ return Status::OK();
}
- virtual Status NewLogger(const std::string& fname, Logger** result) {
- FILE* f = fopen(fname.c_str(), "w");
- if (f == NULL) {
- *result = NULL;
- return IOError(fname, errno);
+ Status NewLogger(const std::string& filename, Logger** result) override {
+ std::FILE* fp = std::fopen(filename.c_str(), "w");
+ if (fp == nullptr) {
+ *result = nullptr;
+ return PosixError(filename, errno);
} else {
- *result = new PosixLogger(f, &PosixEnv::gettid);
+ *result = new PosixLogger(fp);
return Status::OK();
}
}
- virtual uint64_t NowMicros() {
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+ uint64_t NowMicros() override {
+ static constexpr uint64_t kUsecondsPerSecond = 1000000;
+ struct ::timeval tv;
+ ::gettimeofday(&tv, nullptr);
+ return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
}
- virtual void SleepForMicroseconds(int micros) {
- usleep(micros);
+ void SleepForMicroseconds(int micros) override {
+ ::usleep(micros);
}
private:
- void PthreadCall(const char* label, int result) {
- if (result != 0) {
- fprintf(stderr, "pthread %s: %s\n", label, strerror(result));
- abort();
- }
- }
+ void BackgroundThreadMain();
- // BGThread() is the body of the background thread
- void BGThread();
- static void* BGThreadWrapper(void* arg) {
- reinterpret_cast<PosixEnv*>(arg)->BGThread();
- return NULL;
+ static void BackgroundThreadEntryPoint(PosixEnv* env) {
+ env->BackgroundThreadMain();
}
- pthread_mutex_t mu_;
- pthread_cond_t bgsignal_;
- pthread_t bgthread_;
- bool started_bgthread_;
+ // Stores the work item data in a Schedule() call.
+ //
+ // Instances are constructed on the thread calling Schedule() and used on the
+ // background thread.
+ //
+ // This structure is thread-safe beacuse it is immutable.
+ struct BackgroundWorkItem {
+ explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
+ : function(function), arg(arg) {}
+
+ void (* const function)(void*);
+ void* const arg;
+ };
+
+
+ port::Mutex background_work_mutex_;
+ port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
+ bool started_background_thread_ GUARDED_BY(background_work_mutex_);
- // Entry per Schedule() call
- struct BGItem { void* arg; void (*function)(void*); };
- typedef std::deque<BGItem> BGQueue;
- BGQueue queue_;
+ std::queue<BackgroundWorkItem> background_work_queue_
+ GUARDED_BY(background_work_mutex_);
- PosixLockTable locks_;
- Limiter mmap_limit_;
- Limiter fd_limit_;
+ PosixLockTable locks_; // Thread-safe.
+ Limiter mmap_limiter_; // Thread-safe.
+ Limiter fd_limiter_; // Thread-safe.
};
// Return the maximum number of concurrent mmaps.
-static int MaxMmaps() {
- if (mmap_limit >= 0) {
- return mmap_limit;
- }
- // Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes.
- mmap_limit = sizeof(void*) >= 8 ? 1000 : 0;
- return mmap_limit;
+int MaxMmaps() {
+ return g_mmap_limit;
}
// Return the maximum number of read-only files to keep open.
-static intptr_t MaxOpenFiles() {
- if (open_read_only_file_limit >= 0) {
- return open_read_only_file_limit;
+int MaxOpenFiles() {
+ if (g_open_read_only_file_limit >= 0) {
+ return g_open_read_only_file_limit;
}
- struct rlimit rlim;
- if (getrlimit(RLIMIT_NOFILE, &rlim)) {
+ struct ::rlimit rlim;
+ if (::getrlimit(RLIMIT_NOFILE, &rlim)) {
// getrlimit failed, fallback to hard-coded default.
- open_read_only_file_limit = 50;
+ g_open_read_only_file_limit = 50;
} else if (rlim.rlim_cur == RLIM_INFINITY) {
- open_read_only_file_limit = std::numeric_limits<int>::max();
+ g_open_read_only_file_limit = std::numeric_limits<int>::max();
} else {
// Allow use of 20% of available file descriptors for read-only files.
- open_read_only_file_limit = rlim.rlim_cur / 5;
+ g_open_read_only_file_limit = rlim.rlim_cur / 5;
}
- return open_read_only_file_limit;
+ return g_open_read_only_file_limit;
}
+} // namespace
+
PosixEnv::PosixEnv()
- : started_bgthread_(false),
- mmap_limit_(MaxMmaps()),
- fd_limit_(MaxOpenFiles()) {
- PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL));
- PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL));
+ : background_work_cv_(&background_work_mutex_),
+ started_background_thread_(false),
+ mmap_limiter_(MaxMmaps()),
+ fd_limiter_(MaxOpenFiles()) {
}
-void PosixEnv::Schedule(void (*function)(void*), void* arg) {
- PthreadCall("lock", pthread_mutex_lock(&mu_));
+void PosixEnv::Schedule(
+ void (*background_work_function)(void* background_work_arg),
+ void* background_work_arg) {
+ background_work_mutex_.Lock();
- // Start background thread if necessary
- if (!started_bgthread_) {
- started_bgthread_ = true;
- PthreadCall(
- "create thread",
- pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this));
+ // Start the background thread, if we haven't done so already.
+ if (!started_background_thread_) {
+ started_background_thread_ = true;
+ std::thread background_thread(PosixEnv::BackgroundThreadEntryPoint, this);
+ background_thread.detach();
}
- // If the queue is currently empty, the background thread may currently be
- // waiting.
- if (queue_.empty()) {
- PthreadCall("signal", pthread_cond_signal(&bgsignal_));
+ // If the queue is empty, the background thread may be waiting for work.
+ if (background_work_queue_.empty()) {
+ background_work_cv_.Signal();
}
- // Add to priority queue
- queue_.push_back(BGItem());
- queue_.back().function = function;
- queue_.back().arg = arg;
-
- PthreadCall("unlock", pthread_mutex_unlock(&mu_));
+ background_work_queue_.emplace(background_work_function, background_work_arg);
+ background_work_mutex_.Unlock();
}
-void PosixEnv::BGThread() {
+void PosixEnv::BackgroundThreadMain() {
while (true) {
- // Wait until there is an item that is ready to run
- PthreadCall("lock", pthread_mutex_lock(&mu_));
- while (queue_.empty()) {
- PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_));
+ background_work_mutex_.Lock();
+
+ // Wait until there is work to be done.
+ while (background_work_queue_.empty()) {
+ background_work_cv_.Wait();
}
- void (*function)(void*) = queue_.front().function;
- void* arg = queue_.front().arg;
- queue_.pop_front();
+ assert(!background_work_queue_.empty());
+ auto background_work_function =
+ background_work_queue_.front().function;
+ void* background_work_arg = background_work_queue_.front().arg;
+ background_work_queue_.pop();
- PthreadCall("unlock", pthread_mutex_unlock(&mu_));
- (*function)(arg);
+ background_work_mutex_.Unlock();
+ background_work_function(background_work_arg);
}
}
namespace {
-struct StartThreadState {
- void (*user_function)(void*);
- void* arg;
+
+// Wraps an Env instance whose destructor is never created.
+//
+// Intended usage:
+// using PlatformSingletonEnv = SingletonEnv<PlatformEnv>;
+// void ConfigurePosixEnv(int param) {
+// PlatformSingletonEnv::AssertEnvNotInitialized();
+// // set global configuration flags.
+// }
+// Env* Env::Default() {
+// static PlatformSingletonEnv default_env;
+// return default_env.env();
+// }
+template<typename EnvType>
+class SingletonEnv {
+ public:
+ SingletonEnv() {
+#if !defined(NDEBUG)
+ env_initialized_.store(true, std::memory_order::memory_order_relaxed);
+#endif // !defined(NDEBUG)
+ static_assert(sizeof(env_storage_) >= sizeof(EnvType),
+ "env_storage_ will not fit the Env");
+ static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
+ "env_storage_ does not meet the Env's alignment needs");
+ new (&env_storage_) EnvType();
+ }
+ ~SingletonEnv() = default;
+
+ SingletonEnv(const SingletonEnv&) = delete;
+ SingletonEnv& operator=(const SingletonEnv&) = delete;
+
+ Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
+
+ static void AssertEnvNotInitialized() {
+#if !defined(NDEBUG)
+ assert(!env_initialized_.load(std::memory_order::memory_order_relaxed));
+#endif // !defined(NDEBUG)
+ }
+
+ private:
+ typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
+ env_storage_;
+#if !defined(NDEBUG)
+ static std::atomic<bool> env_initialized_;
+#endif // !defined(NDEBUG)
};
-}
-static void* StartThreadWrapper(void* arg) {
- StartThreadState* state = reinterpret_cast<StartThreadState*>(arg);
- state->user_function(state->arg);
- delete state;
- return NULL;
-}
-void PosixEnv::StartThread(void (*function)(void* arg), void* arg) {
- pthread_t t;
- StartThreadState* state = new StartThreadState;
- state->user_function = function;
- state->arg = arg;
- PthreadCall("start thread",
- pthread_create(&t, NULL, &StartThreadWrapper, state));
-}
+#if !defined(NDEBUG)
+template<typename EnvType>
+std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
+#endif // !defined(NDEBUG)
+
+using PosixDefaultEnv = SingletonEnv<PosixEnv>;
} // namespace
-static pthread_once_t once = PTHREAD_ONCE_INIT;
-static Env* default_env;
-static void InitDefaultEnv() { default_env = new PosixEnv; }
+void PosixEnv::StartThread(void (*thread_main)(void* thread_main_arg),
+ void* thread_main_arg) {
+ std::thread new_thread(thread_main, thread_main_arg);
+ new_thread.detach();
+}
void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
- assert(default_env == NULL);
- open_read_only_file_limit = limit;
+ PosixDefaultEnv::AssertEnvNotInitialized();
+ g_open_read_only_file_limit = limit;
}
void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) {
- assert(default_env == NULL);
- mmap_limit = limit;
+ PosixDefaultEnv::AssertEnvNotInitialized();
+ g_mmap_limit = limit;
}
Env* Env::Default() {
- pthread_once(&once, InitDefaultEnv);
- return default_env;
+ static PosixDefaultEnv env_container;
+ return env_container.env();
}
} // namespace leveldb
std::string test_file = test_dir + "/open_on_read.txt";
FILE* f = fopen(test_file.c_str(), "w");
- ASSERT_TRUE(f != NULL);
+ ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f);
fclose(f);
#include "leveldb/env.h"
+#include <algorithm>
+#include <atomic>
+
#include "port/port.h"
+#include "port/thread_annotations.h"
+#include "util/mutexlock.h"
#include "util/testharness.h"
+#include "util/testutil.h"
namespace leveldb {
static const int kMMapLimit = 4;
class EnvTest {
- private:
- port::Mutex mu_;
- std::string events_;
-
public:
Env* env_;
EnvTest() : env_(Env::Default()) { }
};
-static void SetBool(void* ptr) {
- reinterpret_cast<port::AtomicPointer*>(ptr)->NoBarrier_Store(ptr);
+namespace {
+
+static void SetAtomicBool(void* atomic_bool_ptr) {
+ std::atomic<bool>* atomic_bool =
+ reinterpret_cast<std::atomic<bool>*>(atomic_bool_ptr);
+ atomic_bool->store(true, std::memory_order_relaxed);
+}
+
+} // namespace
+
+TEST(EnvTest, ReadWrite) {
+ Random rnd(test::RandomSeed());
+
+ // Get file to use for testing.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/open_on_read.txt";
+ WritableFile* writable_file;
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+
+ // Fill a file with data generated via a sequence of randomly sized writes.
+ static const size_t kDataSize = 10 * 1048576;
+ std::string data;
+ while (data.size() < kDataSize) {
+ int len = rnd.Skewed(18); // Up to 2^18 - 1, but typically much smaller
+ std::string r;
+ test::RandomString(&rnd, len, &r);
+ ASSERT_OK(writable_file->Append(r));
+ data += r;
+ if (rnd.OneIn(10)) {
+ ASSERT_OK(writable_file->Flush());
+ }
+ }
+ ASSERT_OK(writable_file->Sync());
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ // Read all data using a sequence of randomly sized reads.
+ SequentialFile* sequential_file;
+ ASSERT_OK(env_->NewSequentialFile(test_file_name, &sequential_file));
+ std::string read_result;
+ std::string scratch;
+ while (read_result.size() < data.size()) {
+ int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size());
+ scratch.resize(std::max(len, 1)); // at least 1 so &scratch[0] is legal
+ Slice read;
+ ASSERT_OK(sequential_file->Read(len, &read, &scratch[0]));
+ if (len > 0) {
+ ASSERT_GT(read.size(), 0);
+ }
+ ASSERT_LE(read.size(), len);
+ read_result.append(read.data(), read.size());
+ }
+ ASSERT_EQ(read_result, data);
+ delete sequential_file;
}
TEST(EnvTest, RunImmediately) {
- port::AtomicPointer called (NULL);
- env_->Schedule(&SetBool, &called);
+ std::atomic<bool> called(false);
+ env_->Schedule(&SetAtomicBool, &called);
env_->SleepForMicroseconds(kDelayMicros);
- ASSERT_TRUE(called.NoBarrier_Load() != NULL);
+ ASSERT_TRUE(called.load(std::memory_order_relaxed));
}
TEST(EnvTest, RunMany) {
- port::AtomicPointer last_id (NULL);
+ std::atomic<int> last_id(0);
- struct CB {
- port::AtomicPointer* last_id_ptr; // Pointer to shared slot
- uintptr_t id; // Order# for the execution of this callback
+ struct Callback {
+ std::atomic<int>* const last_id_ptr_; // Pointer to shared state.
+ const int id_; // Order# for the execution of this callback.
- CB(port::AtomicPointer* p, int i) : last_id_ptr(p), id(i) { }
+ Callback(std::atomic<int>* last_id_ptr, int id)
+ : last_id_ptr_(last_id_ptr), id_(id) { }
- static void Run(void* v) {
- CB* cb = reinterpret_cast<CB*>(v);
- void* cur = cb->last_id_ptr->NoBarrier_Load();
- ASSERT_EQ(cb->id-1, reinterpret_cast<uintptr_t>(cur));
- cb->last_id_ptr->Release_Store(reinterpret_cast<void*>(cb->id));
+ static void Run(void* arg) {
+ Callback* callback = reinterpret_cast<Callback*>(arg);
+ int current_id = callback->last_id_ptr_->load(std::memory_order_relaxed);
+ ASSERT_EQ(callback->id_ - 1, current_id);
+ callback->last_id_ptr_->store(callback->id_, std::memory_order_relaxed);
}
};
- // Schedule in different order than start time
- CB cb1(&last_id, 1);
- CB cb2(&last_id, 2);
- CB cb3(&last_id, 3);
- CB cb4(&last_id, 4);
- env_->Schedule(&CB::Run, &cb1);
- env_->Schedule(&CB::Run, &cb2);
- env_->Schedule(&CB::Run, &cb3);
- env_->Schedule(&CB::Run, &cb4);
+ Callback callback1(&last_id, 1);
+ Callback callback2(&last_id, 2);
+ Callback callback3(&last_id, 3);
+ Callback callback4(&last_id, 4);
+ env_->Schedule(&Callback::Run, &callback1);
+ env_->Schedule(&Callback::Run, &callback2);
+ env_->Schedule(&Callback::Run, &callback3);
+ env_->Schedule(&Callback::Run, &callback4);
env_->SleepForMicroseconds(kDelayMicros);
- void* cur = last_id.Acquire_Load();
- ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur));
+ ASSERT_EQ(4, last_id.load(std::memory_order_relaxed));
}
struct State {
port::Mutex mu;
- int val;
- int num_running;
+ int val GUARDED_BY(mu);
+ int num_running GUARDED_BY(mu);
+
+ State(int val, int num_running) : val(val), num_running(num_running) { }
};
static void ThreadBody(void* arg) {
}
TEST(EnvTest, StartThread) {
- State state;
- state.val = 0;
- state.num_running = 3;
+ State state(0, 3);
for (int i = 0; i < 3; i++) {
env_->StartThread(&ThreadBody, &state);
}
}
env_->SleepForMicroseconds(kDelayMicros);
}
+
+ MutexLock l(&state.mu);
ASSERT_EQ(state.val, 3);
}
+TEST(EnvTest, TestOpenNonExistentFile) {
+ // Write some test data to a single file that will be opened |n| times.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+
+ std::string non_existent_file = test_dir + "/non_existent_file";
+ ASSERT_TRUE(!env_->FileExists(non_existent_file));
+
+ RandomAccessFile* random_access_file;
+ Status status = env_->NewRandomAccessFile(
+ non_existent_file, &random_access_file);
+ ASSERT_TRUE(status.IsNotFound());
+
+ SequentialFile* sequential_file;
+ status = env_->NewSequentialFile(non_existent_file, &sequential_file);
+ ASSERT_TRUE(status.IsNotFound());
+}
+
+TEST(EnvTest, ReopenWritableFile) {
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/reopen_writable_file.txt";
+ env_->DeleteFile(test_file_name);
+
+ WritableFile* writable_file;
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+ std::string data("hello world!");
+ ASSERT_OK(writable_file->Append(data));
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file));
+ data = "42";
+ ASSERT_OK(writable_file->Append(data));
+ ASSERT_OK(writable_file->Close());
+ delete writable_file;
+
+ ASSERT_OK(ReadFileToString(env_, test_file_name, &data));
+ ASSERT_EQ(std::string("42"), data);
+ env_->DeleteFile(test_file_name);
+}
+
+TEST(EnvTest, ReopenAppendableFile) {
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file_name = test_dir + "/reopen_appendable_file.txt";
+ env_->DeleteFile(test_file_name);
+
+ WritableFile* appendable_file;
+ ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
+ std::string data("hello world!");
+ ASSERT_OK(appendable_file->Append(data));
+ ASSERT_OK(appendable_file->Close());
+ delete appendable_file;
+
+ ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
+ data = "42";
+ ASSERT_OK(appendable_file->Append(data));
+ ASSERT_OK(appendable_file->Close());
+ delete appendable_file;
+
+ ASSERT_OK(ReadFileToString(env_, test_file_name, &data));
+ ASSERT_EQ(std::string("hello world!42"), data);
+ env_->DeleteFile(test_file_name);
+}
+
} // namespace leveldb
int main(int argc, char** argv) {
--- /dev/null
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// Prevent Windows headers from defining min/max macros and instead
+// use STL.
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif // ifndef NOMINMAX
+#include <windows.h>
+
+#include <algorithm>
+#include <atomic>
+#include <chrono>
+#include <condition_variable>
+#include <deque>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "leveldb/env.h"
+#include "leveldb/slice.h"
+#include "port/port.h"
+#include "port/thread_annotations.h"
+#include "util/env_windows_test_helper.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/windows_logger.h"
+
+#if defined(DeleteFile)
+#undef DeleteFile
+#endif // defined(DeleteFile)
+
+namespace leveldb {
+
+namespace {
+
+constexpr const size_t kWritableFileBufferSize = 65536;
+
+// Up to 1000 mmaps for 64-bit binaries; none for 32-bit.
+constexpr int kDefaultMmapLimit = sizeof(void*) >= 8 ? 1000 : 0;
+
+// Modified by EnvWindowsTestHelper::SetReadOnlyMMapLimit().
+int g_mmap_limit = kDefaultMmapLimit;
+
+std::string GetWindowsErrorMessage(DWORD error_code) {
+ std::string message;
+ char* error_text = nullptr;
+ // Use MBCS version of FormatMessage to match return value.
+ size_t error_text_size = ::FormatMessageA(
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ reinterpret_cast<char*>(&error_text), 0, nullptr);
+ if (!error_text) {
+ return message;
+ }
+ message.assign(error_text, error_text_size);
+ ::LocalFree(error_text);
+ return message;
+}
+
+Status WindowsError(const std::string& context, DWORD error_code) {
+ if (error_code == ERROR_FILE_NOT_FOUND || error_code == ERROR_PATH_NOT_FOUND)
+ return Status::NotFound(context, GetWindowsErrorMessage(error_code));
+ return Status::IOError(context, GetWindowsErrorMessage(error_code));
+}
+
+class ScopedHandle {
+ public:
+ ScopedHandle(HANDLE handle) : handle_(handle) {}
+ ScopedHandle(ScopedHandle&& other) noexcept : handle_(other.Release()) {}
+ ~ScopedHandle() { Close(); }
+
+ ScopedHandle& operator=(ScopedHandle&& rhs) noexcept {
+ if (this != &rhs) handle_ = rhs.Release();
+ return *this;
+ }
+
+ bool Close() {
+ if (!is_valid()) {
+ return true;
+ }
+ HANDLE h = handle_;
+ handle_ = INVALID_HANDLE_VALUE;
+ return ::CloseHandle(h);
+ }
+
+ bool is_valid() const {
+ return handle_ != INVALID_HANDLE_VALUE && handle_ != nullptr;
+ }
+
+ HANDLE get() const { return handle_; }
+
+ HANDLE Release() {
+ HANDLE h = handle_;
+ handle_ = INVALID_HANDLE_VALUE;
+ return h;
+ }
+
+ private:
+ HANDLE handle_;
+};
+
+// Helper class to limit resource usage to avoid exhaustion.
+// Currently used to limit read-only file descriptors and mmap file usage
+// so that we do not run out of file descriptors or virtual memory, or run into
+// kernel performance problems for very large databases.
+class Limiter {
+ public:
+ // Limit maximum number of resources to |max_acquires|.
+ Limiter(int max_acquires) : acquires_allowed_(max_acquires) {}
+
+ Limiter(const Limiter&) = delete;
+ Limiter operator=(const Limiter&) = delete;
+
+ // If another resource is available, acquire it and return true.
+ // Else return false.
+ bool Acquire() {
+ int old_acquires_allowed =
+ acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
+
+ if (old_acquires_allowed > 0)
+ return true;
+
+ acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+ return false;
+ }
+
+ // Release a resource acquired by a previous call to Acquire() that returned
+ // true.
+ void Release() {
+ acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
+ }
+
+ private:
+ // The number of available resources.
+ //
+ // This is a counter and is not tied to the invariants of any other class, so
+ // it can be operated on safely using std::memory_order_relaxed.
+ std::atomic<int> acquires_allowed_;
+};
+
+class WindowsSequentialFile : public SequentialFile {
+ public:
+ WindowsSequentialFile(std::string fname, ScopedHandle file)
+ : filename_(fname), file_(std::move(file)) {}
+ ~WindowsSequentialFile() override {}
+
+ Status Read(size_t n, Slice* result, char* scratch) override {
+ Status s;
+ DWORD bytes_read;
+ // DWORD is 32-bit, but size_t could technically be larger. However leveldb
+ // files are limited to leveldb::Options::max_file_size which is clamped to
+ // 1<<30 or 1 GiB.
+ assert(n <= std::numeric_limits<DWORD>::max());
+ if (!::ReadFile(file_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+ nullptr)) {
+ s = WindowsError(filename_, ::GetLastError());
+ } else {
+ *result = Slice(scratch, bytes_read);
+ }
+ return s;
+ }
+
+ Status Skip(uint64_t n) override {
+ LARGE_INTEGER distance;
+ distance.QuadPart = n;
+ if (!::SetFilePointerEx(file_.get(), distance, nullptr, FILE_CURRENT)) {
+ return WindowsError(filename_, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ private:
+ std::string filename_;
+ ScopedHandle file_;
+};
+
+class WindowsRandomAccessFile : public RandomAccessFile {
+ public:
+ WindowsRandomAccessFile(std::string fname, ScopedHandle handle)
+ : filename_(fname), handle_(std::move(handle)) {}
+
+ ~WindowsRandomAccessFile() override = default;
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
+ DWORD bytes_read = 0;
+ OVERLAPPED overlapped = {0};
+
+ overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+ overlapped.Offset = static_cast<DWORD>(offset);
+ if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
+ &overlapped)) {
+ DWORD error_code = ::GetLastError();
+ if (error_code != ERROR_HANDLE_EOF) {
+ *result = Slice(scratch, 0);
+ return Status::IOError(filename_, GetWindowsErrorMessage(error_code));
+ }
+ }
+
+ *result = Slice(scratch, bytes_read);
+ return Status::OK();
+ }
+
+ private:
+ std::string filename_;
+ ScopedHandle handle_;
+};
+
+class WindowsMmapReadableFile : public RandomAccessFile {
+ public:
+ // base[0,length-1] contains the mmapped contents of the file.
+ WindowsMmapReadableFile(std::string fname, void* base, size_t length,
+ Limiter* limiter)
+ : filename_(std::move(fname)),
+ mmapped_region_(base),
+ length_(length),
+ limiter_(limiter) {}
+
+ ~WindowsMmapReadableFile() override {
+ ::UnmapViewOfFile(mmapped_region_);
+ limiter_->Release();
+ }
+
+ Status Read(uint64_t offset, size_t n, Slice* result,
+ char* scratch) const override {
+ Status s;
+ if (offset + n > length_) {
+ *result = Slice();
+ s = WindowsError(filename_, ERROR_INVALID_PARAMETER);
+ } else {
+ *result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
+ }
+ return s;
+ }
+
+ private:
+ std::string filename_;
+ void* mmapped_region_;
+ size_t length_;
+ Limiter* limiter_;
+};
+
+class WindowsWritableFile : public WritableFile {
+ public:
+ WindowsWritableFile(std::string fname, ScopedHandle handle)
+ : filename_(std::move(fname)), handle_(std::move(handle)), pos_(0) {}
+
+ ~WindowsWritableFile() override = default;
+
+ Status Append(const Slice& data) override {
+ size_t n = data.size();
+ const char* p = data.data();
+
+ // Fit as much as possible into buffer.
+ size_t copy = std::min(n, kWritableFileBufferSize - pos_);
+ memcpy(buf_ + pos_, p, copy);
+ p += copy;
+ n -= copy;
+ pos_ += copy;
+ if (n == 0) {
+ return Status::OK();
+ }
+
+ // Can't fit in buffer, so need to do at least one write.
+ Status s = FlushBuffered();
+ if (!s.ok()) {
+ return s;
+ }
+
+ // Small writes go to buffer, large writes are written directly.
+ if (n < kWritableFileBufferSize) {
+ memcpy(buf_, p, n);
+ pos_ = n;
+ return Status::OK();
+ }
+ return WriteRaw(p, n);
+ }
+
+ Status Close() override {
+ Status result = FlushBuffered();
+ if (!handle_.Close() && result.ok()) {
+ result = WindowsError(filename_, ::GetLastError());
+ }
+ return result;
+ }
+
+ Status Flush() override { return FlushBuffered(); }
+
+ Status Sync() override {
+ // On Windows no need to sync parent directory. It's metadata will be
+ // updated via the creation of the new file, without an explicit sync.
+ return FlushBuffered();
+ }
+
+ private:
+ Status FlushBuffered() {
+ Status s = WriteRaw(buf_, pos_);
+ pos_ = 0;
+ return s;
+ }
+
+ Status WriteRaw(const char* p, size_t n) {
+ DWORD bytes_written;
+ if (!::WriteFile(handle_.get(), p, static_cast<DWORD>(n), &bytes_written,
+ nullptr)) {
+ return Status::IOError(filename_,
+ GetWindowsErrorMessage(::GetLastError()));
+ }
+ return Status::OK();
+ }
+
+ // buf_[0, pos_-1] contains data to be written to handle_.
+ const std::string filename_;
+ ScopedHandle handle_;
+ char buf_[kWritableFileBufferSize];
+ size_t pos_;
+};
+
+// Lock or unlock the entire file as specified by |lock|. Returns true
+// when successful, false upon failure. Caller should call ::GetLastError()
+// to determine cause of failure
+bool LockOrUnlock(HANDLE handle, bool lock) {
+ if (lock) {
+ return ::LockFile(handle,
+ /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0,
+ /*nNumberOfBytesToLockLow=*/MAXDWORD,
+ /*nNumberOfBytesToLockHigh=*/MAXDWORD);
+ } else {
+ return ::UnlockFile(handle,
+ /*dwFileOffsetLow=*/0, /*dwFileOffsetHigh=*/0,
+ /*nNumberOfBytesToLockLow=*/MAXDWORD,
+ /*nNumberOfBytesToLockHigh=*/MAXDWORD);
+ }
+}
+
+class WindowsFileLock : public FileLock {
+ public:
+ WindowsFileLock(ScopedHandle handle, std::string name)
+ : handle_(std::move(handle)), name_(std::move(name)) {}
+
+ ScopedHandle& handle() { return handle_; }
+ const std::string& name() const { return name_; }
+
+ private:
+ ScopedHandle handle_;
+ std::string name_;
+};
+
+class WindowsEnv : public Env {
+ public:
+ WindowsEnv();
+ ~WindowsEnv() override {
+ static char msg[] = "Destroying Env::Default()\n";
+ fwrite(msg, 1, sizeof(msg), stderr);
+ abort();
+ }
+
+ Status NewSequentialFile(const std::string& fname,
+ SequentialFile** result) override {
+ *result = nullptr;
+ DWORD desired_access = GENERIC_READ;
+ DWORD share_mode = FILE_SHARE_READ;
+ ScopedHandle handle =
+ ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
+ if (!handle.is_valid()) {
+ return WindowsError(fname, ::GetLastError());
+ }
+ *result = new WindowsSequentialFile(fname, std::move(handle));
+ return Status::OK();
+ }
+
+ Status NewRandomAccessFile(const std::string& fname,
+ RandomAccessFile** result) override {
+ *result = nullptr;
+ DWORD desired_access = GENERIC_READ;
+ DWORD share_mode = FILE_SHARE_READ;
+ DWORD file_flags = FILE_ATTRIBUTE_READONLY;
+
+ ScopedHandle handle =
+ ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
+ OPEN_EXISTING, file_flags, nullptr);
+ if (!handle.is_valid()) {
+ return WindowsError(fname, ::GetLastError());
+ }
+ if (!mmap_limiter_.Acquire()) {
+ *result = new WindowsRandomAccessFile(fname, std::move(handle));
+ return Status::OK();
+ }
+
+ LARGE_INTEGER file_size;
+ if (!::GetFileSizeEx(handle.get(), &file_size)) {
+ return WindowsError(fname, ::GetLastError());
+ }
+
+ ScopedHandle mapping =
+ ::CreateFileMappingA(handle.get(),
+ /*security attributes=*/nullptr, PAGE_READONLY,
+ /*dwMaximumSizeHigh=*/0,
+ /*dwMaximumSizeLow=*/0, nullptr);
+ if (mapping.is_valid()) {
+ void* base = MapViewOfFile(mapping.get(), FILE_MAP_READ, 0, 0, 0);
+ if (base) {
+ *result = new WindowsMmapReadableFile(
+ fname, base, static_cast<size_t>(file_size.QuadPart),
+ &mmap_limiter_);
+ return Status::OK();
+ }
+ }
+ Status s = WindowsError(fname, ::GetLastError());
+
+ if (!s.ok()) {
+ mmap_limiter_.Release();
+ }
+ return s;
+ }
+
+ Status NewWritableFile(const std::string& fname,
+ WritableFile** result) override {
+ DWORD desired_access = GENERIC_WRITE;
+ DWORD share_mode = 0;
+
+ ScopedHandle handle =
+ ::CreateFileA(fname.c_str(), desired_access, share_mode, nullptr,
+ CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+ if (!handle.is_valid()) {
+ *result = nullptr;
+ return WindowsError(fname, ::GetLastError());
+ }
+
+ *result = new WindowsWritableFile(fname, std::move(handle));
+ return Status::OK();
+ }
+
+ Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) override {
+ ScopedHandle handle =
+ ::CreateFileA(fname.c_str(), FILE_APPEND_DATA, 0, nullptr, OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, nullptr);
+ if (!handle.is_valid()) {
+ *result = nullptr;
+ return WindowsError(fname, ::GetLastError());
+ }
+
+ *result = new WindowsWritableFile(fname, std::move(handle));
+ return Status::OK();
+ }
+
+ bool FileExists(const std::string& fname) override {
+ return GetFileAttributesA(fname.c_str()) != INVALID_FILE_ATTRIBUTES;
+ }
+
+ Status GetChildren(const std::string& dir,
+ std::vector<std::string>* result) override {
+ const std::string find_pattern = dir + "\\*";
+ WIN32_FIND_DATAA find_data;
+ HANDLE dir_handle = ::FindFirstFileA(find_pattern.c_str(), &find_data);
+ if (dir_handle == INVALID_HANDLE_VALUE) {
+ DWORD last_error = ::GetLastError();
+ if (last_error == ERROR_FILE_NOT_FOUND) {
+ return Status::OK();
+ }
+ return WindowsError(dir, last_error);
+ }
+ do {
+ char base_name[_MAX_FNAME];
+ char ext[_MAX_EXT];
+
+ if (!_splitpath_s(find_data.cFileName, nullptr, 0, nullptr, 0, base_name,
+ ARRAYSIZE(base_name), ext, ARRAYSIZE(ext))) {
+ result->emplace_back(std::string(base_name) + ext);
+ }
+ } while (::FindNextFileA(dir_handle, &find_data));
+ DWORD last_error = ::GetLastError();
+ ::FindClose(dir_handle);
+ if (last_error != ERROR_NO_MORE_FILES) {
+ return WindowsError(dir, last_error);
+ }
+ return Status::OK();
+ }
+
+ Status DeleteFile(const std::string& fname) override {
+ if (!::DeleteFileA(fname.c_str())) {
+ return WindowsError(fname, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status CreateDir(const std::string& name) override {
+ if (!::CreateDirectoryA(name.c_str(), nullptr)) {
+ return WindowsError(name, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status DeleteDir(const std::string& name) override {
+ if (!::RemoveDirectoryA(name.c_str())) {
+ return WindowsError(name, ::GetLastError());
+ }
+ return Status::OK();
+ }
+
+ Status GetFileSize(const std::string& fname, uint64_t* size) override {
+ WIN32_FILE_ATTRIBUTE_DATA attrs;
+ if (!::GetFileAttributesExA(fname.c_str(), GetFileExInfoStandard, &attrs)) {
+ return WindowsError(fname, ::GetLastError());
+ }
+ ULARGE_INTEGER file_size;
+ file_size.HighPart = attrs.nFileSizeHigh;
+ file_size.LowPart = attrs.nFileSizeLow;
+ *size = file_size.QuadPart;
+ return Status::OK();
+ }
+
+ Status RenameFile(const std::string& src,
+ const std::string& target) override {
+ // Try a simple move first. It will only succeed when |to_path| doesn't
+ // already exist.
+ if (::MoveFileA(src.c_str(), target.c_str())) {
+ return Status::OK();
+ }
+ DWORD move_error = ::GetLastError();
+
+ // Try the full-blown replace if the move fails, as ReplaceFile will only
+ // succeed when |to_path| does exist. When writing to a network share, we
+ // may not be able to change the ACLs. Ignore ACL errors then
+ // (REPLACEFILE_IGNORE_MERGE_ERRORS).
+ if (::ReplaceFileA(target.c_str(), src.c_str(), nullptr,
+ REPLACEFILE_IGNORE_MERGE_ERRORS, nullptr, nullptr)) {
+ return Status::OK();
+ }
+ DWORD replace_error = ::GetLastError();
+ // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely
+ // that |to_path| does not exist. In this case, the more relevant error
+ // comes from the call to MoveFile.
+ if (replace_error == ERROR_FILE_NOT_FOUND ||
+ replace_error == ERROR_PATH_NOT_FOUND) {
+ return WindowsError(src, move_error);
+ } else {
+ return WindowsError(src, replace_error);
+ }
+ }
+
+ Status LockFile(const std::string& fname, FileLock** lock) override {
+ *lock = nullptr;
+ Status result;
+ ScopedHandle handle = ::CreateFileA(
+ fname.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ,
+ /*lpSecurityAttributes=*/nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ nullptr);
+ if (!handle.is_valid()) {
+ result = WindowsError(fname, ::GetLastError());
+ } else if (!LockOrUnlock(handle.get(), true)) {
+ result = WindowsError("lock " + fname, ::GetLastError());
+ } else {
+ *lock = new WindowsFileLock(std::move(handle), std::move(fname));
+ }
+ return result;
+ }
+
+ Status UnlockFile(FileLock* lock) override {
+ std::unique_ptr<WindowsFileLock> my_lock(
+ reinterpret_cast<WindowsFileLock*>(lock));
+ Status result;
+ if (!LockOrUnlock(my_lock->handle().get(), false)) {
+ result = WindowsError("unlock", ::GetLastError());
+ }
+ return result;
+ }
+
+ void Schedule(void (*function)(void*), void* arg) override;
+
+ void StartThread(void (*function)(void* arg), void* arg) override {
+ std::thread t(function, arg);
+ t.detach();
+ }
+
+ Status GetTestDirectory(std::string* result) override {
+ const char* env = getenv("TEST_TMPDIR");
+ if (env && env[0] != '\0') {
+ *result = env;
+ return Status::OK();
+ }
+
+ char tmp_path[MAX_PATH];
+ if (!GetTempPathA(ARRAYSIZE(tmp_path), tmp_path)) {
+ return WindowsError("GetTempPath", ::GetLastError());
+ }
+ std::stringstream ss;
+ ss << tmp_path << "leveldbtest-" << std::this_thread::get_id();
+ *result = ss.str();
+
+ // Directory may already exist
+ CreateDir(*result);
+ return Status::OK();
+ }
+
+ Status NewLogger(const std::string& filename, Logger** result) override {
+ std::FILE* fp = std::fopen(filename.c_str(), "w");
+ if (fp == nullptr) {
+ *result = nullptr;
+ return WindowsError("NewLogger", ::GetLastError());
+ } else {
+ *result = new WindowsLogger(fp);
+ return Status::OK();
+ }
+ }
+
+ uint64_t NowMicros() override {
+ // GetSystemTimeAsFileTime typically has a resolution of 10-20 msec.
+ // TODO(cmumford): Switch to GetSystemTimePreciseAsFileTime which is
+ // available in Windows 8 and later.
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ // Each tick represents a 100-nanosecond intervals since January 1, 1601
+ // (UTC).
+ uint64_t num_ticks =
+ (static_cast<uint64_t>(ft.dwHighDateTime) << 32) + ft.dwLowDateTime;
+ return num_ticks / 10;
+ }
+
+ void SleepForMicroseconds(int micros) override {
+ std::this_thread::sleep_for(std::chrono::microseconds(micros));
+ }
+
+ private:
+ // BGThread() is the body of the background thread
+ void BGThread();
+
+ std::mutex mu_;
+ std::condition_variable bgsignal_;
+ bool started_bgthread_;
+
+ // Entry per Schedule() call
+ struct BGItem {
+ void* arg;
+ void (*function)(void*);
+ };
+ typedef std::deque<BGItem> BGQueue;
+ BGQueue queue_;
+
+ Limiter mmap_limiter_;
+};
+
+// Return the maximum number of concurrent mmaps.
+int MaxMmaps() {
+ if (g_mmap_limit >= 0) {
+ return g_mmap_limit;
+ }
+ // Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes.
+ g_mmap_limit = sizeof(void*) >= 8 ? 1000 : 0;
+ return g_mmap_limit;
+}
+
+WindowsEnv::WindowsEnv()
+ : started_bgthread_(false), mmap_limiter_(MaxMmaps()) {}
+
+void WindowsEnv::Schedule(void (*function)(void*), void* arg) {
+ std::lock_guard<std::mutex> guard(mu_);
+
+ // Start background thread if necessary
+ if (!started_bgthread_) {
+ started_bgthread_ = true;
+ std::thread t(&WindowsEnv::BGThread, this);
+ t.detach();
+ }
+
+ // If the queue is currently empty, the background thread may currently be
+ // waiting.
+ if (queue_.empty()) {
+ bgsignal_.notify_one();
+ }
+
+ // Add to priority queue
+ queue_.push_back(BGItem());
+ queue_.back().function = function;
+ queue_.back().arg = arg;
+}
+
+void WindowsEnv::BGThread() {
+ while (true) {
+ // Wait until there is an item that is ready to run
+ std::unique_lock<std::mutex> lk(mu_);
+ bgsignal_.wait(lk, [this] { return !queue_.empty(); });
+
+ void (*function)(void*) = queue_.front().function;
+ void* arg = queue_.front().arg;
+ queue_.pop_front();
+
+ lk.unlock();
+ (*function)(arg);
+ }
+}
+
+} // namespace
+
+static std::once_flag once;
+static Env* default_env;
+static void InitDefaultEnv() { default_env = new WindowsEnv(); }
+
+void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) {
+ assert(default_env == nullptr);
+ g_mmap_limit = limit;
+}
+
+Env* Env::Default() {
+ std::call_once(once, InitDefaultEnv);
+ return default_env;
+}
+
+} // namespace leveldb
--- /dev/null
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "leveldb/env.h"
+
+#include "port/port.h"
+#include "util/env_windows_test_helper.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+static const int kMMapLimit = 4;
+
+class EnvWindowsTest {
+ public:
+ Env* env_;
+ EnvWindowsTest() : env_(Env::Default()) {}
+
+ static void SetFileLimits(int mmap_limit) {
+ EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
+ }
+};
+
+TEST(EnvWindowsTest, TestOpenOnRead) {
+ // Write some test data to a single file that will be opened |n| times.
+ std::string test_dir;
+ ASSERT_OK(env_->GetTestDirectory(&test_dir));
+ std::string test_file = test_dir + "/open_on_read.txt";
+
+ FILE* f = fopen(test_file.c_str(), "w");
+ ASSERT_TRUE(f != nullptr);
+ const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
+ fputs(kFileData, f);
+ fclose(f);
+
+ // Open test file some number above the sum of the two limits to force
+ // leveldb::WindowsEnv to switch from mapping the file into memory
+ // to basic file reading.
+ const int kNumFiles = kMMapLimit + 5;
+ leveldb::RandomAccessFile* files[kNumFiles] = {0};
+ for (int i = 0; i < kNumFiles; i++) {
+ ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i]));
+ }
+ char scratch;
+ Slice read_result;
+ for (int i = 0; i < kNumFiles; i++) {
+ ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch));
+ ASSERT_EQ(kFileData[i], read_result[0]);
+ }
+ for (int i = 0; i < kNumFiles; i++) {
+ delete files[i];
+ }
+ ASSERT_OK(env_->DeleteFile(test_file));
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ // All tests currently run with the same read-only file limits.
+ leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit);
+ return leveldb::test::RunAllTests();
+}
--- /dev/null
+// Copyright 2018 (c) The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
+#define STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
+
+namespace leveldb {
+
+class EnvWindowsTest;
+
+// A helper for the Windows Env to facilitate testing.
+class EnvWindowsTestHelper {
+ private:
+ friend class CorruptionTest;
+ friend class EnvWindowsTest;
+
+ // Set the maximum number of read-only files that will be mapped via mmap.
+ // Must be called before creating an Env.
+ static void SetReadOnlyMMapLimit(int limit);
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_ENV_WINDOWS_TEST_HELPER_H_
namespace leveldb {
-extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
+uint32_t Hash(const char* data, size_t n, uint32_t seed);
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_UTIL_HASH_H_
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
+#include <limits>
#include "leveldb/env.h"
#include "leveldb/slice.h"
}
bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
- uint64_t v = 0;
- int digits = 0;
- while (!in->empty()) {
- char c = (*in)[0];
- if (c >= '0' && c <= '9') {
- ++digits;
- const int delta = (c - '0');
- static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0);
- if (v > kMaxUint64/10 ||
- (v == kMaxUint64/10 && delta > kMaxUint64%10)) {
- // Overflow
- return false;
- }
- v = (v * 10) + delta;
- in->remove_prefix(1);
- } else {
+ // Constants that will be optimized away.
+ constexpr const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
+ constexpr const char kLastDigitOfMaxUint64 =
+ '0' + static_cast<char>(kMaxUint64 % 10);
+
+ uint64_t value = 0;
+
+ // reinterpret_cast-ing from char* to unsigned char* to avoid signedness.
+ const unsigned char* start =
+ reinterpret_cast<const unsigned char*>(in->data());
+
+ const unsigned char* end = start + in->size();
+ const unsigned char* current = start;
+ for (; current != end; ++current) {
+ const unsigned char ch = *current;
+ if (ch < '0' || ch > '9')
break;
+
+ // Overflow check.
+ // kMaxUint64 / 10 is also constant and will be optimized away.
+ if (value > kMaxUint64 / 10 ||
+ (value == kMaxUint64 / 10 && ch > kLastDigitOfMaxUint64)) {
+ return false;
}
+
+ value = (value * 10) + (ch - '0');
}
- *val = v;
- return (digits > 0);
+
+ *val = value;
+ const size_t digits_consumed = current - start;
+ in->remove_prefix(digits_consumed);
+ return digits_consumed != 0;
}
} // namespace leveldb
class WritableFile;
// Append a human-readable printout of "num" to *str
-extern void AppendNumberTo(std::string* str, uint64_t num);
+void AppendNumberTo(std::string* str, uint64_t num);
// Append a human-readable printout of "value" to *str.
// Escapes any non-printable characters found in "value".
-extern void AppendEscapedStringTo(std::string* str, const Slice& value);
+void AppendEscapedStringTo(std::string* str, const Slice& value);
// Return a human-readable printout of "num"
-extern std::string NumberToString(uint64_t num);
+std::string NumberToString(uint64_t num);
// Return a human-readable version of "value".
// Escapes any non-printable characters found in "value".
-extern std::string EscapeString(const Slice& value);
+std::string EscapeString(const Slice& value);
// Parse a human-readable number from "*in" into *value. On success,
// advances "*in" past the consumed number and sets "*val" to the
// numeric value. Otherwise, returns false and leaves *in in an
// unspecified state.
-extern bool ConsumeDecimalNumber(Slice* in, uint64_t* val);
+bool ConsumeDecimalNumber(Slice* in, uint64_t* val);
} // namespace leveldb
--- /dev/null
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <limits>
+#include <string>
+
+#include "leveldb/slice.h"
+#include "util/logging.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+class Logging { };
+
+TEST(Logging, NumberToString) {
+ ASSERT_EQ("0", NumberToString(0));
+ ASSERT_EQ("1", NumberToString(1));
+ ASSERT_EQ("9", NumberToString(9));
+
+ ASSERT_EQ("10", NumberToString(10));
+ ASSERT_EQ("11", NumberToString(11));
+ ASSERT_EQ("19", NumberToString(19));
+ ASSERT_EQ("99", NumberToString(99));
+
+ ASSERT_EQ("100", NumberToString(100));
+ ASSERT_EQ("109", NumberToString(109));
+ ASSERT_EQ("190", NumberToString(190));
+ ASSERT_EQ("123", NumberToString(123));
+ ASSERT_EQ("12345678", NumberToString(12345678));
+
+ static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U,
+ "Test consistency check");
+ ASSERT_EQ("18446744073709551000", NumberToString(18446744073709551000U));
+ ASSERT_EQ("18446744073709551600", NumberToString(18446744073709551600U));
+ ASSERT_EQ("18446744073709551610", NumberToString(18446744073709551610U));
+ ASSERT_EQ("18446744073709551614", NumberToString(18446744073709551614U));
+ ASSERT_EQ("18446744073709551615", NumberToString(18446744073709551615U));
+}
+
+void ConsumeDecimalNumberRoundtripTest(uint64_t number,
+ const std::string& padding = "") {
+ std::string decimal_number = NumberToString(number);
+ std::string input_string = decimal_number + padding;
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_TRUE(ConsumeDecimalNumber(&output, &result));
+ ASSERT_EQ(number, result);
+ ASSERT_EQ(decimal_number.size(), output.data() - input.data());
+ ASSERT_EQ(padding.size(), output.size());
+}
+
+TEST(Logging, ConsumeDecimalNumberRoundtrip) {
+ ConsumeDecimalNumberRoundtripTest(0);
+ ConsumeDecimalNumberRoundtripTest(1);
+ ConsumeDecimalNumberRoundtripTest(9);
+
+ ConsumeDecimalNumberRoundtripTest(10);
+ ConsumeDecimalNumberRoundtripTest(11);
+ ConsumeDecimalNumberRoundtripTest(19);
+ ConsumeDecimalNumberRoundtripTest(99);
+
+ ConsumeDecimalNumberRoundtripTest(100);
+ ConsumeDecimalNumberRoundtripTest(109);
+ ConsumeDecimalNumberRoundtripTest(190);
+ ConsumeDecimalNumberRoundtripTest(123);
+ ASSERT_EQ("12345678", NumberToString(12345678));
+
+ for (uint64_t i = 0; i < 100; ++i) {
+ uint64_t large_number = std::numeric_limits<uint64_t>::max() - i;
+ ConsumeDecimalNumberRoundtripTest(large_number);
+ }
+}
+
+TEST(Logging, ConsumeDecimalNumberRoundtripWithPadding) {
+ ConsumeDecimalNumberRoundtripTest(0, " ");
+ ConsumeDecimalNumberRoundtripTest(1, "abc");
+ ConsumeDecimalNumberRoundtripTest(9, "x");
+
+ ConsumeDecimalNumberRoundtripTest(10, "_");
+ ConsumeDecimalNumberRoundtripTest(11, std::string("\0\0\0", 3));
+ ConsumeDecimalNumberRoundtripTest(19, "abc");
+ ConsumeDecimalNumberRoundtripTest(99, "padding");
+
+ ConsumeDecimalNumberRoundtripTest(100, " ");
+
+ for (uint64_t i = 0; i < 100; ++i) {
+ uint64_t large_number = std::numeric_limits<uint64_t>::max() - i;
+ ConsumeDecimalNumberRoundtripTest(large_number, "pad");
+ }
+}
+
+void ConsumeDecimalNumberOverflowTest(const std::string& input_string) {
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result));
+}
+
+TEST(Logging, ConsumeDecimalNumberOverflow) {
+ static_assert(std::numeric_limits<uint64_t>::max() == 18446744073709551615U,
+ "Test consistency check");
+ ConsumeDecimalNumberOverflowTest("18446744073709551616");
+ ConsumeDecimalNumberOverflowTest("18446744073709551617");
+ ConsumeDecimalNumberOverflowTest("18446744073709551618");
+ ConsumeDecimalNumberOverflowTest("18446744073709551619");
+ ConsumeDecimalNumberOverflowTest("18446744073709551620");
+ ConsumeDecimalNumberOverflowTest("18446744073709551621");
+ ConsumeDecimalNumberOverflowTest("18446744073709551622");
+ ConsumeDecimalNumberOverflowTest("18446744073709551623");
+ ConsumeDecimalNumberOverflowTest("18446744073709551624");
+ ConsumeDecimalNumberOverflowTest("18446744073709551625");
+ ConsumeDecimalNumberOverflowTest("18446744073709551626");
+
+ ConsumeDecimalNumberOverflowTest("18446744073709551700");
+
+ ConsumeDecimalNumberOverflowTest("99999999999999999999");
+}
+
+void ConsumeDecimalNumberNoDigitsTest(const std::string& input_string) {
+ Slice input(input_string);
+ Slice output = input;
+ uint64_t result;
+ ASSERT_EQ(false, ConsumeDecimalNumber(&output, &result));
+ ASSERT_EQ(input.data(), output.data());
+ ASSERT_EQ(input.size(), output.size());
+}
+
+TEST(Logging, ConsumeDecimalNumberNoDigits) {
+ ConsumeDecimalNumberNoDigitsTest("");
+ ConsumeDecimalNumberNoDigitsTest(" ");
+ ConsumeDecimalNumberNoDigitsTest("a");
+ ConsumeDecimalNumberNoDigitsTest(" 123");
+ ConsumeDecimalNumberNoDigitsTest("a123");
+ ConsumeDecimalNumberNoDigitsTest(std::string("\000123", 4));
+ ConsumeDecimalNumberNoDigitsTest(std::string("\177123", 4));
+ ConsumeDecimalNumberNoDigitsTest(std::string("\377123", 4));
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
}
~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
+ MutexLock(const MutexLock&) = delete;
+ MutexLock& operator=(const MutexLock&) = delete;
+
private:
port::Mutex *const mu_;
- // No copying allowed
- MutexLock(const MutexLock&);
- void operator=(const MutexLock&);
};
} // namespace leveldb
--- /dev/null
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
+#define STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
+
+#include <type_traits>
+#include <utility>
+
+namespace leveldb {
+
+// Wraps an instance whose destructor is never called.
+//
+// This is intended for use with function-level static variables.
+template<typename InstanceType>
+class NoDestructor {
+ public:
+ template <typename... ConstructorArgTypes>
+ explicit NoDestructor(ConstructorArgTypes&&... constructor_args) {
+ static_assert(sizeof(instance_storage_) >= sizeof(InstanceType),
+ "instance_storage_ is not large enough to hold the instance");
+ static_assert(
+ alignof(decltype(instance_storage_)) >= alignof(InstanceType),
+ "instance_storage_ does not meet the instance's alignment requirement");
+ new (&instance_storage_) InstanceType(
+ std::forward<ConstructorArgTypes>(constructor_args)...);
+ }
+
+ ~NoDestructor() = default;
+
+ NoDestructor(const NoDestructor&) = delete;
+ NoDestructor& operator=(const NoDestructor&) = delete;
+
+ InstanceType* get() {
+ return reinterpret_cast<InstanceType*>(&instance_storage_);
+ }
+
+ private:
+ typename
+ std::aligned_storage<sizeof(InstanceType), alignof(InstanceType)>::type
+ instance_storage_;
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_NO_DESTRUCTOR_H_
--- /dev/null
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstdint>
+#include <cstdlib>
+#include <utility>
+
+#include "util/no_destructor.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+namespace {
+
+struct DoNotDestruct {
+ public:
+ DoNotDestruct(uint32_t a, uint64_t b) : a(a), b(b) {}
+ ~DoNotDestruct() { std::abort(); }
+
+ // Used to check constructor argument forwarding.
+ uint32_t a;
+ uint64_t b;
+};
+
+constexpr const uint32_t kGoldenA = 0xdeadbeef;
+constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
+
+} // namespace
+
+class NoDestructorTest { };
+
+TEST(NoDestructorTest, StackInstance) {
+ NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
+ ASSERT_EQ(kGoldenA, instance.get()->a);
+ ASSERT_EQ(kGoldenB, instance.get()->b);
+}
+
+TEST(NoDestructorTest, StaticInstance) {
+ static NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
+ ASSERT_EQ(kGoldenA, instance.get()->a);
+ ASSERT_EQ(kGoldenB, instance.get()->b);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
Options::Options()
: comparator(BytewiseComparator()),
- create_if_missing(false),
- error_if_exists(false),
- paranoid_checks(false),
- env(Env::Default()),
- info_log(NULL),
- write_buffer_size(4<<20),
- max_open_files(1000),
- block_cache(NULL),
- block_size(4096),
- block_restart_interval(16),
- max_file_size(2<<20),
- compression(kSnappyCompression),
- reuse_logs(false),
- filter_policy(NULL) {
+ env(Env::Default()) {
}
} // namespace leveldb
#ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
#define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
-#include <algorithm>
-#include <stdio.h>
#include <sys/time.h>
-#include <time.h>
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+#include <sstream>
+#include <thread>
+
#include "leveldb/env.h"
namespace leveldb {
-class PosixLogger : public Logger {
- private:
- FILE* file_;
- uint64_t (*gettid_)(); // Return the thread id for the current thread
+class PosixLogger final : public Logger {
public:
- PosixLogger(FILE* f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { }
- virtual ~PosixLogger() {
- fclose(file_);
+ // Creates a logger that writes to the given file.
+ //
+ // The PosixLogger instance takes ownership of the file handle.
+ explicit PosixLogger(std::FILE* fp) : fp_(fp) {
+ assert(fp != nullptr);
}
- virtual void Logv(const char* format, va_list ap) {
- const uint64_t thread_id = (*gettid_)();
-
- // We try twice: the first time with a fixed-size stack allocated buffer,
- // and the second time with a much larger dynamically allocated buffer.
- char buffer[500];
- for (int iter = 0; iter < 2; iter++) {
- char* base;
- int bufsize;
- if (iter == 0) {
- bufsize = sizeof(buffer);
- base = buffer;
- } else {
- bufsize = 30000;
- base = new char[bufsize];
- }
- char* p = base;
- char* limit = base + bufsize;
-
- struct timeval now_tv;
- gettimeofday(&now_tv, NULL);
- const time_t seconds = now_tv.tv_sec;
- struct tm t;
- localtime_r(&seconds, &t);
- p += snprintf(p, limit - p,
- "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ",
- t.tm_year + 1900,
- t.tm_mon + 1,
- t.tm_mday,
- t.tm_hour,
- t.tm_min,
- t.tm_sec,
- static_cast<int>(now_tv.tv_usec),
- static_cast<long long unsigned int>(thread_id));
-
- // Print the message
- if (p < limit) {
- va_list backup_ap;
- va_copy(backup_ap, ap);
- p += vsnprintf(p, limit - p, format, backup_ap);
- va_end(backup_ap);
- }
- // Truncate to available space if necessary
- if (p >= limit) {
- if (iter == 0) {
- continue; // Try again with larger buffer
- } else {
- p = limit - 1;
+ ~PosixLogger() override {
+ std::fclose(fp_);
+ }
+
+ void Logv(const char* format, va_list arguments) override {
+ // Record the time as close to the Logv() call as possible.
+ struct ::timeval now_timeval;
+ ::gettimeofday(&now_timeval, nullptr);
+ const std::time_t now_seconds = now_timeval.tv_sec;
+ struct std::tm now_components;
+ ::localtime_r(&now_seconds, &now_components);
+
+ // Record the thread ID.
+ constexpr const int kMaxThreadIdSize = 32;
+ std::ostringstream thread_stream;
+ thread_stream << std::this_thread::get_id();
+ std::string thread_id = thread_stream.str();
+ if (thread_id.size() > kMaxThreadIdSize) {
+ thread_id.resize(kMaxThreadIdSize);
+ }
+
+ // We first attempt to print into a stack-allocated buffer. If this attempt
+ // fails, we make a second attempt with a dynamically allocated buffer.
+ constexpr const int kStackBufferSize = 512;
+ char stack_buffer[kStackBufferSize];
+ static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize),
+ "sizeof(char) is expected to be 1 in C++");
+
+ int dynamic_buffer_size = 0; // Computed in the first iteration.
+ for (int iteration = 0; iteration < 2; ++iteration) {
+ const int buffer_size =
+ (iteration == 0) ? kStackBufferSize : dynamic_buffer_size;
+ char* const buffer =
+ (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
+
+ // Print the header into the buffer.
+ int buffer_offset = snprintf(
+ buffer, buffer_size,
+ "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+ now_components.tm_year + 1900,
+ now_components.tm_mon + 1,
+ now_components.tm_mday,
+ now_components.tm_hour,
+ now_components.tm_min,
+ now_components.tm_sec,
+ static_cast<int>(now_timeval.tv_usec),
+ thread_id.c_str());
+
+ // The header can be at most 28 characters (10 date + 15 time +
+ // 3 delimiters) plus the thread ID, which should fit comfortably into the
+ // static buffer.
+ assert(buffer_offset <= 28 + kMaxThreadIdSize);
+ static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
+ "stack-allocated buffer may not fit the message header");
+ assert(buffer_offset < buffer_size);
+
+ // Print the message into the buffer.
+ std::va_list arguments_copy;
+ va_copy(arguments_copy, arguments);
+ buffer_offset += std::vsnprintf(buffer + buffer_offset,
+ buffer_size - buffer_offset, format,
+ arguments_copy);
+ va_end(arguments_copy);
+
+ // The code below may append a newline at the end of the buffer, which
+ // requires an extra character.
+ if (buffer_offset >= buffer_size - 1) {
+ // The message did not fit into the buffer.
+ if (iteration == 0) {
+ // Re-run the loop and use a dynamically-allocated buffer. The buffer
+ // will be large enough for the log message, an extra newline and a
+ // null terminator.
+ dynamic_buffer_size = buffer_offset + 2;
+ continue;
}
+
+ // The dynamically-allocated buffer was incorrectly sized. This should
+ // not happen, assuming a correct implementation of (v)snprintf. Fail
+ // in tests, recover by truncating the log message in production.
+ assert(false);
+ buffer_offset = buffer_size - 1;
}
- // Add newline if necessary
- if (p == base || p[-1] != '\n') {
- *p++ = '\n';
+ // Add a newline if necessary.
+ if (buffer[buffer_offset - 1] != '\n') {
+ buffer[buffer_offset] = '\n';
+ ++buffer_offset;
}
- assert(p <= limit);
- fwrite(base, 1, p - base, file_);
- fflush(file_);
- if (base != buffer) {
- delete[] base;
+ assert(buffer_offset <= buffer_size);
+ std::fwrite(buffer, 1, buffer_offset, fp_);
+ std::fflush(fp_);
+
+ if (iteration != 0) {
+ delete[] buffer;
}
break;
}
}
+
+ private:
+ std::FILE* const fp_;
};
} // namespace leveldb
}
std::string Status::ToString() const {
- if (state_ == NULL) {
+ if (state_ == nullptr) {
return "OK";
} else {
char tmp[30];
--- /dev/null
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <utility>
+
+#include "leveldb/slice.h"
+#include "leveldb/status.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+TEST(Status, MoveConstructor) {
+ {
+ Status ok = Status::OK();
+ Status ok2 = std::move(ok);
+
+ ASSERT_TRUE(ok2.ok());
+ }
+
+ {
+ Status status = Status::NotFound("custom NotFound status message");
+ Status status2 = std::move(status);
+
+ ASSERT_TRUE(status2.IsNotFound());
+ ASSERT_EQ("NotFound: custom NotFound status message", status2.ToString());
+ }
+
+ {
+ Status self_moved = Status::IOError("custom IOError status message");
+
+ // Needed to bypass compiler warning about explicit move-assignment.
+ Status& self_moved_reference = self_moved;
+ self_moved_reference = std::move(self_moved);
+ }
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) {
+ return leveldb::test::RunAllTests();
+}
#include "util/testharness.h"
-#include <string>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <string>
+#include <vector>
+
+#include "leveldb/env.h"
+
namespace leveldb {
namespace test {
}
bool RegisterTest(const char* base, const char* name, void (*func)()) {
- if (tests == NULL) {
+ if (tests == nullptr) {
tests = new std::vector<Test>;
}
Test t;
const char* matcher = getenv("LEVELDB_TESTS");
int num = 0;
- if (tests != NULL) {
+ if (tests != nullptr) {
for (size_t i = 0; i < tests->size(); i++) {
const Test& t = (*tests)[i];
- if (matcher != NULL) {
+ if (matcher != nullptr) {
std::string name = t.base;
name.push_back('.');
name.append(t.name);
- if (strstr(name.c_str(), matcher) == NULL) {
+ if (strstr(name.c_str(), matcher) == nullptr) {
continue;
}
}
int RandomSeed() {
const char* env = getenv("TEST_RANDOM_SEED");
- int result = (env != NULL ? atoi(env) : 301);
+ int result = (env != nullptr ? atoi(env) : 301);
if (result <= 0) {
result = 301;
}
#include <stdio.h>
#include <stdlib.h>
+
#include <sstream>
-#include "leveldb/env.h"
-#include "leveldb/slice.h"
-#include "util/random.h"
+
+#include "leveldb/status.h"
namespace leveldb {
namespace test {
//
// Returns 0 if all tests pass.
// Dies or returns a non-zero value if some test fails.
-extern int RunAllTests();
+int RunAllTests();
// Return the directory to use for temporary storage.
-extern std::string TmpDir();
+std::string TmpDir();
// Return a randomization seed for this run. Typically returns the
// same number on repeated invocations of this binary, but automated
// runs may be able to vary the seed.
-extern int RandomSeed();
+int RandomSeed();
// An instance of Tester is allocated to hold temporary state during
// the execution of an assertion.
return *this;
}
-#define BINARY_OP(name,op) \
+#define BINARY_OP(name, op) \
template <class X, class Y> \
Tester& name(const X& x, const Y& y) { \
- if (! (x op y)) { \
+ if (!(x op y)) { \
ss_ << " failed: " << x << (" " #op " ") << y; \
ok_ = false; \
} \
#define ASSERT_LE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a),(b))
#define ASSERT_LT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a),(b))
-#define TCONCAT(a,b) TCONCAT1(a,b)
-#define TCONCAT1(a,b) a##b
-
-#define TEST(base,name) \
-class TCONCAT(_Test_,name) : public base { \
- public: \
- void _Run(); \
- static void _RunIt() { \
- TCONCAT(_Test_,name) t; \
- t._Run(); \
- } \
-}; \
-bool TCONCAT(_Test_ignored_,name) = \
- ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_,name)::_RunIt); \
-void TCONCAT(_Test_,name)::_Run()
+#define TCONCAT(a, b) TCONCAT1(a, b)
+#define TCONCAT1(a, b) a##b
+
+#define TEST(base, name) \
+class TCONCAT(_Test_, name) : public base { \
+ public: \
+ void _Run(); \
+ static void _RunIt() { \
+ TCONCAT(_Test_, name) t; \
+ t._Run(); \
+ } \
+}; \
+bool TCONCAT(_Test_ignored_, name) = \
+ ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_, name)::_RunIt); \
+void TCONCAT(_Test_, name)::_Run()
// Register the specified test. Typically not used directly, but
// invoked via the macro expansion of TEST.
-extern bool RegisterTest(const char* base, const char* name, void (*func)());
-
+bool RegisterTest(const char* base, const char* name, void (*func)());
} // namespace test
} // namespace leveldb
}
-extern Slice CompressibleString(Random* rnd, double compressed_fraction,
- size_t len, std::string* dst) {
+Slice CompressibleString(Random* rnd, double compressed_fraction,
+ size_t len, std::string* dst) {
int raw = static_cast<int>(len * compressed_fraction);
if (raw < 1) raw = 1;
std::string raw_data;
#ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_
#define STORAGE_LEVELDB_UTIL_TESTUTIL_H_
+#include "helpers/memenv/memenv.h"
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "util/random.h"
// Store in *dst a random string of length "len" and return a Slice that
// references the generated data.
-extern Slice RandomString(Random* rnd, int len, std::string* dst);
+Slice RandomString(Random* rnd, int len, std::string* dst);
// Return a random key with the specified length that may contain interesting
// characters (e.g. \x00, \xff, etc.).
-extern std::string RandomKey(Random* rnd, int len);
+std::string RandomKey(Random* rnd, int len);
// Store in *dst a string of length "len" that will compress to
// "N*compressed_fraction" bytes and return a Slice that references
// the generated data.
-extern Slice CompressibleString(Random* rnd, double compressed_fraction,
- size_t len, std::string* dst);
+Slice CompressibleString(Random* rnd, double compressed_fraction,
+ size_t len, std::string* dst);
// A wrapper that allows injection of errors.
class ErrorEnv : public EnvWrapper {
bool writable_file_error_;
int num_writable_file_errors_;
- ErrorEnv() : EnvWrapper(Env::Default()),
+ ErrorEnv() : EnvWrapper(NewMemEnv(Env::Default())),
writable_file_error_(false),
num_writable_file_errors_(0) { }
+ ~ErrorEnv() override {
+ delete target();
+ }
- virtual Status NewWritableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewWritableFile(const std::string& fname,
+ WritableFile** result) override {
if (writable_file_error_) {
++num_writable_file_errors_;
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewWritableFile(fname, result);
}
- virtual Status NewAppendableFile(const std::string& fname,
- WritableFile** result) {
+ Status NewAppendableFile(const std::string& fname,
+ WritableFile** result) override {
if (writable_file_error_) {
++num_writable_file_errors_;
- *result = NULL;
+ *result = nullptr;
return Status::IOError(fname, "fake error");
}
return target()->NewAppendableFile(fname, result);
--- /dev/null
+// Copyright (c) 2018 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// Logger implementation for the Windows platform.
+
+#ifndef STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
+#define STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+#include <sstream>
+#include <thread>
+
+#include "leveldb/env.h"
+
+namespace leveldb {
+
+class WindowsLogger final : public Logger {
+ public:
+ // Creates a logger that writes to the given file.
+ //
+ // The PosixLogger instance takes ownership of the file handle.
+ explicit WindowsLogger(std::FILE* fp) : fp_(fp) {
+ assert(fp != nullptr);
+ }
+
+ ~WindowsLogger() override {
+ std::fclose(fp_);
+ }
+
+ void Logv(const char* format, va_list arguments) override {
+ // Record the time as close to the Logv() call as possible.
+ SYSTEMTIME now_components;
+ ::GetLocalTime(&now_components);
+
+ // Record the thread ID.
+ constexpr const int kMaxThreadIdSize = 32;
+ std::ostringstream thread_stream;
+ thread_stream << std::this_thread::get_id();
+ std::string thread_id = thread_stream.str();
+ if (thread_id.size() > kMaxThreadIdSize) {
+ thread_id.resize(kMaxThreadIdSize);
+ }
+
+ // We first attempt to print into a stack-allocated buffer. If this attempt
+ // fails, we make a second attempt with a dynamically allocated buffer.
+ constexpr const int kStackBufferSize = 512;
+ char stack_buffer[kStackBufferSize];
+ static_assert(sizeof(stack_buffer) == static_cast<size_t>(kStackBufferSize),
+ "sizeof(char) is expected to be 1 in C++");
+
+ int dynamic_buffer_size = 0; // Computed in the first iteration.
+ for (int iteration = 0; iteration < 2; ++iteration) {
+ const int buffer_size =
+ (iteration == 0) ? kStackBufferSize : dynamic_buffer_size;
+ char* const buffer =
+ (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size];
+
+ // Print the header into the buffer.
+ int buffer_offset = snprintf(
+ buffer, buffer_size,
+ "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+ now_components.wYear,
+ now_components.wMonth,
+ now_components.wDay,
+ now_components.wHour,
+ now_components.wMinute,
+ now_components.wSecond,
+ static_cast<int>(now_components.wMilliseconds * 1000),
+ thread_id.c_str());
+
+ // The header can be at most 28 characters (10 date + 15 time +
+ // 3 delimiters) plus the thread ID, which should fit comfortably into the
+ // static buffer.
+ assert(buffer_offset <= 28 + kMaxThreadIdSize);
+ static_assert(28 + kMaxThreadIdSize < kStackBufferSize,
+ "stack-allocated buffer may not fit the message header");
+ assert(buffer_offset < buffer_size);
+
+ // Print the message into the buffer.
+ std::va_list arguments_copy;
+ va_copy(arguments_copy, arguments);
+ buffer_offset += std::vsnprintf(buffer + buffer_offset,
+ buffer_size - buffer_offset, format,
+ arguments_copy);
+ va_end(arguments_copy);
+
+ // The code below may append a newline at the end of the buffer, which
+ // requires an extra character.
+ if (buffer_offset >= buffer_size - 1) {
+ // The message did not fit into the buffer.
+ if (iteration == 0) {
+ // Re-run the loop and use a dynamically-allocated buffer. The buffer
+ // will be large enough for the log message, an extra newline and a
+ // null terminator.
+ dynamic_buffer_size = buffer_offset + 2;
+ continue;
+ }
+
+ // The dynamically-allocated buffer was incorrectly sized. This should
+ // not happen, assuming a correct implementation of (v)snprintf. Fail
+ // in tests, recover by truncating the log message in production.
+ assert(false);
+ buffer_offset = buffer_size - 1;
+ }
+
+ // Add a newline if necessary.
+ if (buffer[buffer_offset - 1] != '\n') {
+ buffer[buffer_offset] = '\n';
+ ++buffer_offset;
+ }
+
+ assert(buffer_offset <= buffer_size);
+ std::fwrite(buffer, 1, buffer_offset, fp_);
+ std::fflush(fp_);
+
+ if (iteration != 0) {
+ delete[] buffer;
+ }
+ break;
+ }
+ }
+
+ private:
+ std::FILE* const fp_;
+};
+
+} // namespace leveldb
+
+#endif // STORAGE_LEVELDB_UTIL_WINDOWS_LOGGER_H_