--- /dev/null
+# Run manually to reformat a file:
+# clang-format -i --style=file <file>
+# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
+BasedOnStyle: Google
+DerivePointerAlignment: false
+
+# Public headers are in a different location in the internal Google repository.
+# Order them so that when imported to the authoritative repository they will be
+# in correct alphabetical order.
+IncludeCategories:
+ - Regex: '^(<|"(db|helpers)/)'
+ Priority: 1
+ - Regex: '^"(leveldb)/'
+ Priority: 2
+ - Regex: '^(<|"(issues|port|table|third_party|util)/)'
+ Priority: 3
+ - Regex: '.*'
+ Priority: 4
-# Build matrix / environment variable are explained on:
+# Build matrix / environment variables are explained on:
# http://about.travis-ci.org/docs/user/build-configuration/
# This file can be validated on: http://lint.travis-ci.org/
-dist: xenial
language: cpp
+dist: xenial
+osx_image: xcode10.2
compiler:
- - gcc
- - clang
+- gcc
+- clang
os:
- - linux
- - osx
+- linux
+- osx
env:
- - BUILD_TYPE=Debug
- - BUILD_TYPE=RelWithDebInfo
-
-matrix:
- exclude:
- # GCC fails on recent Travis OSX images.
- # https://github.com/travis-ci/travis-ci/issues/9640
- - compiler: gcc
- os: osx
+- BUILD_TYPE=Debug
+- BUILD_TYPE=RelWithDebInfo
addons:
apt:
sources:
- - llvm-toolchain-xenial-7
+ - llvm-toolchain-xenial-8
- ubuntu-toolchain-r-test
packages:
- - clang-7
+ - clang-8
- cmake
- gcc-8
- g++-8
- ninja-build
homebrew:
packages:
+ - cmake
- crc32c
+ - gcc@8
- gperftools
- kyotocabinet
- - gcc@7
+ - llvm@8
- ninja
- snappy
- sqlite3
-
-before_install:
-# The Travis VM image for Mac already has a link at /usr/local/include/c++,
-# causing Homebrew's gcc installation to error out. This was reported to
-# Homebrew maintainers at https://github.com/Homebrew/brew/issues/1742 and
-# removing the link emerged as a workaround.
-- if [ "$TRAVIS_OS_NAME" == "osx" ]; then rm -f /usr/local/include/c++ ; fi
+ update: true
install:
-# /usr/bin/gcc is stuck to old versions on both Linux and OSX.
+# The following Homebrew packages aren't linked by default, and need to be
+# prepended to the path explicitly.
+- if [ "$TRAVIS_OS_NAME" == "osx" ]; then
+ export PATH="$(brew --prefix llvm)/bin:$PATH";
+ fi
+# /usr/bin/gcc points to an older compiler on both Linux and macOS.
- if [ "$CXX" = "g++" ]; then export CXX="g++-8" CC="gcc-8"; fi
+# /usr/bin/clang points to an older compiler on both Linux and macOS.
+#
+# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
+# below don't work on macOS. Fortunately, the path change above makes the
+# default values (clang and clang++) resolve to the correct compiler on macOS.
+- if [ "$TRAVIS_OS_NAME" == "linux" ]; then
+ if [ "$CXX" = "clang++" ]; then export CXX="clang++-8" CC="clang-8"; fi;
+ fi
- echo ${CC}
- echo ${CXX}
- ${CXX} --version
cmake_minimum_required(VERSION 3.9)
# Keep the version below in sync with the one in db.h
-project(leveldb VERSION 1.21.0 LANGUAGES C CXX)
+project(leveldb VERSION 1.22.0 LANGUAGES C CXX)
# This project can use C11, but will gracefully decay down to C89.
set(CMAKE_C_STANDARD 11)
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
)
+
+set_target_properties(leveldb
+ PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR})
+
target_compile_definitions(leveldb
PRIVATE
# Used by include/export.h when building shared libraries.
leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue178_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc")
+ leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue320_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc")
First generate the Visual Studio 2017 project/solution files:
-```bash
-mkdir -p build
+```cmd
+mkdir build
cd build
cmake -G "Visual Studio 15" ..
```
The default default will build for x86. For 64-bit run:
-```bash
+```cmd
cmake -G "Visual Studio 15 Win64" ..
```
To compile the Windows solution from the command-line:
-```bash
+```cmd
devenv /build Debug leveldb.sln
```
3. **Tests**: All changes must be accompanied by a new (or changed) test, or
a sufficient explanation as to why a new (or changed) test is not required.
+4. **Consistent Style**: This project conforms to the
+ [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
+ To ensure your changes are properly formatted please run:
+
+ ```
+ clang-format -i --style=file <file>
+ ```
+
## Submitting a Pull Request
Before any pull request will be accepted the author must first sign a
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/db.h"
#include "db/db_impl.h"
#include "leveldb/cache.h"
+#include "leveldb/db.h"
#include "util/testharness.h"
#include "util/testutil.h"
class AutoCompactTest {
public:
- std::string dbname_;
- Cache* tiny_cache_;
- Options options_;
- DB* db_;
-
AutoCompactTest() {
dbname_ = test::TmpDir() + "/autocompact_test";
tiny_cache_ = NewLRUCache(100);
}
void DoReads(int n);
+
+ private:
+ std::string dbname_;
+ Cache* tiny_cache_;
+ Options options_;
+ DB* db_;
};
static const int kValueSize = 200 * 1024;
ASSERT_LT(read, 100) << "Taking too long to compact";
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst();
- iter->Valid() && iter->key().ToString() < limit_key;
- iter->Next()) {
+ iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) {
// Drop data
}
delete iter;
// Wait a little bit to allow any triggered compactions to complete.
Env::Default()->SleepForMicroseconds(1000000);
uint64_t size = Size(Key(0), Key(n));
- fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
- read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
- if (size <= initial_size/10) {
+ fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
+ size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
+ if (size <= initial_size / 10) {
break;
}
}
// is pretty much unchanged.
const int64_t final_other_size = Size(Key(n), Key(kCount));
ASSERT_LE(final_other_size, initial_other_size + 1048576);
- ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
+ ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
}
-TEST(AutoCompactTest, ReadAll) {
- DoReads(kCount);
-}
+TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
-TEST(AutoCompactTest, ReadHalf) {
- DoReads(kCount/2);
-}
+TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
#include "db/builder.h"
-#include "db/filename.h"
#include "db/dbformat.h"
+#include "db/filename.h"
#include "db/table_cache.h"
#include "db/version_edit.h"
#include "leveldb/db.h"
namespace leveldb {
-Status BuildTable(const std::string& dbname,
- Env* env,
- const Options& options,
- TableCache* table_cache,
- Iterator* iter,
- FileMetaData* meta) {
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+ TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
Status s;
meta->file_size = 0;
iter->SeekToFirst();
if (s.ok()) {
// Verify that the table is usable
- Iterator* it = table_cache->NewIterator(ReadOptions(),
- meta->number,
+ Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number,
meta->file_size);
s = it->status();
delete it;
// *meta will be filled with metadata about the generated table.
// If no data is present in *iter, meta->file_size will be set to
// zero, and no Table file will be produced.
-Status BuildTable(const std::string& dbname,
- Env* env,
- const Options& options,
- TableCache* table_cache,
- Iterator* iter,
- FileMetaData* meta);
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+ TableCache* table_cache, Iterator* iter, FileMetaData* meta);
} // namespace leveldb
#include "leveldb/c.h"
#include <stdlib.h>
+
#include "leveldb/cache.h"
#include "leveldb/comparator.h"
#include "leveldb/db.h"
extern "C" {
-struct leveldb_t { DB* rep; };
-struct leveldb_iterator_t { Iterator* rep; };
-struct leveldb_writebatch_t { WriteBatch rep; };
-struct leveldb_snapshot_t { const Snapshot* rep; };
-struct leveldb_readoptions_t { ReadOptions rep; };
-struct leveldb_writeoptions_t { WriteOptions rep; };
-struct leveldb_options_t { Options rep; };
-struct leveldb_cache_t { Cache* rep; };
-struct leveldb_seqfile_t { SequentialFile* rep; };
-struct leveldb_randomfile_t { RandomAccessFile* rep; };
-struct leveldb_writablefile_t { WritableFile* rep; };
-struct leveldb_logger_t { Logger* rep; };
-struct leveldb_filelock_t { FileLock* rep; };
+struct leveldb_t {
+ DB* rep;
+};
+struct leveldb_iterator_t {
+ Iterator* rep;
+};
+struct leveldb_writebatch_t {
+ WriteBatch rep;
+};
+struct leveldb_snapshot_t {
+ const Snapshot* rep;
+};
+struct leveldb_readoptions_t {
+ ReadOptions rep;
+};
+struct leveldb_writeoptions_t {
+ WriteOptions rep;
+};
+struct leveldb_options_t {
+ Options rep;
+};
+struct leveldb_cache_t {
+ Cache* rep;
+};
+struct leveldb_seqfile_t {
+ SequentialFile* rep;
+};
+struct leveldb_randomfile_t {
+ RandomAccessFile* rep;
+};
+struct leveldb_writablefile_t {
+ WritableFile* rep;
+};
+struct leveldb_logger_t {
+ Logger* rep;
+};
+struct leveldb_filelock_t {
+ FileLock* rep;
+};
struct leveldb_comparator_t : public Comparator {
- void* state_;
- void (*destructor_)(void*);
- int (*compare_)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen);
- const char* (*name_)(void*);
-
- virtual ~leveldb_comparator_t() {
- (*destructor_)(state_);
- }
+ virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
virtual int Compare(const Slice& a, const Slice& b) const {
return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
}
- virtual const char* Name() const {
- return (*name_)(state_);
- }
+ virtual const char* Name() const { return (*name_)(state_); }
// No-ops since the C binding does not support key shortening methods.
- virtual void FindShortestSeparator(std::string*, const Slice&) const { }
- virtual void FindShortSuccessor(std::string* key) const { }
-};
+ virtual void FindShortestSeparator(std::string*, const Slice&) const {}
+ virtual void FindShortSuccessor(std::string* key) const {}
-struct leveldb_filterpolicy_t : public FilterPolicy {
void* state_;
void (*destructor_)(void*);
+ int (*compare_)(void*, const char* a, size_t alen, const char* b,
+ size_t blen);
const char* (*name_)(void*);
- char* (*create_)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length);
- unsigned char (*key_match_)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length);
-
- virtual ~leveldb_filterpolicy_t() {
- (*destructor_)(state_);
- }
+};
- virtual const char* Name() const {
- return (*name_)(state_);
- }
+struct leveldb_filterpolicy_t : public FilterPolicy {
+ virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
+
+ virtual const char* Name() const { return (*name_)(state_); }
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
std::vector<const char*> key_pointers(n);
}
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
- return (*key_match_)(state_, key.data(), key.size(),
- filter.data(), filter.size());
+ return (*key_match_)(state_, key.data(), key.size(), filter.data(),
+ filter.size());
}
+
+ void* state_;
+ void (*destructor_)(void*);
+ const char* (*name_)(void*);
+ char* (*create_)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length);
+ unsigned char (*key_match_)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length);
};
struct leveldb_env_t {
return result;
}
-leveldb_t* leveldb_open(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name,
+ char** errptr) {
DB* db;
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
return nullptr;
delete db;
}
-void leveldb_put(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- const char* val, size_t vallen,
- char** errptr) {
+void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, const char* val, size_t vallen,
+ char** errptr) {
SaveError(errptr,
db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
}
-void leveldb_delete(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- const char* key, size_t keylen,
- char** errptr) {
+void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options,
+ const char* key, size_t keylen, char** errptr) {
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
}
-
-void leveldb_write(
- leveldb_t* db,
- const leveldb_writeoptions_t* options,
- leveldb_writebatch_t* batch,
- char** errptr) {
+void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options,
+ leveldb_writebatch_t* batch, char** errptr) {
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
}
-char* leveldb_get(
- leveldb_t* db,
- const leveldb_readoptions_t* options,
- const char* key, size_t keylen,
- size_t* vallen,
- char** errptr) {
+char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options,
+ const char* key, size_t keylen, size_t* vallen,
+ char** errptr) {
char* result = nullptr;
std::string tmp;
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
}
leveldb_iterator_t* leveldb_create_iterator(
- leveldb_t* db,
- const leveldb_readoptions_t* options) {
+ leveldb_t* db, const leveldb_readoptions_t* options) {
leveldb_iterator_t* result = new leveldb_iterator_t;
result->rep = db->rep->NewIterator(options->rep);
return result;
}
-const leveldb_snapshot_t* leveldb_create_snapshot(
- leveldb_t* db) {
+const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) {
leveldb_snapshot_t* result = new leveldb_snapshot_t;
result->rep = db->rep->GetSnapshot();
return result;
}
-void leveldb_release_snapshot(
- leveldb_t* db,
- const leveldb_snapshot_t* snapshot) {
+void leveldb_release_snapshot(leveldb_t* db,
+ const leveldb_snapshot_t* snapshot) {
db->rep->ReleaseSnapshot(snapshot->rep);
delete snapshot;
}
-char* leveldb_property_value(
- leveldb_t* db,
- const char* propname) {
+char* leveldb_property_value(leveldb_t* db, const char* propname) {
std::string tmp;
if (db->rep->GetProperty(Slice(propname), &tmp)) {
// We use strdup() since we expect human readable output.
}
}
-void leveldb_approximate_sizes(
- leveldb_t* db,
- int num_ranges,
- const char* const* range_start_key, const size_t* range_start_key_len,
- const char* const* range_limit_key, const size_t* range_limit_key_len,
- uint64_t* sizes) {
+void leveldb_approximate_sizes(leveldb_t* db, int num_ranges,
+ const char* const* range_start_key,
+ const size_t* range_start_key_len,
+ const char* const* range_limit_key,
+ const size_t* range_limit_key_len,
+ uint64_t* sizes) {
Range* ranges = new Range[num_ranges];
for (int i = 0; i < num_ranges; i++) {
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
delete[] ranges;
}
-void leveldb_compact_range(
- leveldb_t* db,
- const char* start_key, size_t start_key_len,
- const char* limit_key, size_t limit_key_len) {
+void leveldb_compact_range(leveldb_t* db, const char* start_key,
+ size_t start_key_len, const char* limit_key,
+ size_t limit_key_len) {
Slice a, b;
db->rep->CompactRange(
// Pass null Slice if corresponding "const char*" is null
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
}
-void leveldb_destroy_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
+ char** errptr) {
SaveError(errptr, DestroyDB(name, options->rep));
}
-void leveldb_repair_db(
- const leveldb_options_t* options,
- const char* name,
- char** errptr) {
+void leveldb_repair_db(const leveldb_options_t* options, const char* name,
+ char** errptr) {
SaveError(errptr, RepairDB(name, options->rep));
}
iter->rep->Seek(Slice(k, klen));
}
-void leveldb_iter_next(leveldb_iterator_t* iter) {
- iter->rep->Next();
-}
+void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); }
-void leveldb_iter_prev(leveldb_iterator_t* iter) {
- iter->rep->Prev();
-}
+void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); }
const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
Slice s = iter->rep->key();
return new leveldb_writebatch_t;
}
-void leveldb_writebatch_destroy(leveldb_writebatch_t* b) {
- delete b;
-}
+void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; }
-void leveldb_writebatch_clear(leveldb_writebatch_t* b) {
- b->rep.Clear();
-}
+void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); }
-void leveldb_writebatch_put(
- leveldb_writebatch_t* b,
- const char* key, size_t klen,
- const char* val, size_t vlen) {
+void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key,
+ size_t klen, const char* val, size_t vlen) {
b->rep.Put(Slice(key, klen), Slice(val, vlen));
}
-void leveldb_writebatch_delete(
- leveldb_writebatch_t* b,
- const char* key, size_t klen) {
+void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key,
+ size_t klen) {
b->rep.Delete(Slice(key, klen));
}
-void leveldb_writebatch_iterate(
- const leveldb_writebatch_t* b,
- void* state,
- void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
- void (*deleted)(void*, const char* k, size_t klen)) {
+void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
+ void (*put)(void*, const char* k, size_t klen,
+ const char* v, size_t vlen),
+ void (*deleted)(void*, const char* k,
+ size_t klen)) {
class H : public WriteBatch::Handler {
public:
void* state_;
b->rep.Iterate(&handler);
}
-void leveldb_writebatch_append(leveldb_writebatch_t *destination,
- const leveldb_writebatch_t *source) {
+void leveldb_writebatch_append(leveldb_writebatch_t* destination,
+ const leveldb_writebatch_t* source) {
destination->rep.Append(source->rep);
}
-leveldb_options_t* leveldb_options_create() {
- return new leveldb_options_t;
-}
+leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; }
-void leveldb_options_destroy(leveldb_options_t* options) {
- delete options;
-}
+void leveldb_options_destroy(leveldb_options_t* options) { delete options; }
-void leveldb_options_set_comparator(
- leveldb_options_t* opt,
- leveldb_comparator_t* cmp) {
+void leveldb_options_set_comparator(leveldb_options_t* opt,
+ leveldb_comparator_t* cmp) {
opt->rep.comparator = cmp;
}
-void leveldb_options_set_filter_policy(
- leveldb_options_t* opt,
- leveldb_filterpolicy_t* policy) {
+void leveldb_options_set_filter_policy(leveldb_options_t* opt,
+ leveldb_filterpolicy_t* policy) {
opt->rep.filter_policy = policy;
}
-void leveldb_options_set_create_if_missing(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_create_if_missing(leveldb_options_t* opt,
+ unsigned char v) {
opt->rep.create_if_missing = v;
}
-void leveldb_options_set_error_if_exists(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_error_if_exists(leveldb_options_t* opt,
+ unsigned char v) {
opt->rep.error_if_exists = v;
}
-void leveldb_options_set_paranoid_checks(
- leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_paranoid_checks(leveldb_options_t* opt,
+ unsigned char v) {
opt->rep.paranoid_checks = v;
}
}
leveldb_comparator_t* leveldb_comparator_create(
- void* state,
- void (*destructor)(void*),
- int (*compare)(
- void*,
- const char* a, size_t alen,
- const char* b, size_t blen),
+ void* state, void (*destructor)(void*),
+ int (*compare)(void*, const char* a, size_t alen, const char* b,
+ size_t blen),
const char* (*name)(void*)) {
leveldb_comparator_t* result = new leveldb_comparator_t;
result->state_ = state;
return result;
}
-void leveldb_comparator_destroy(leveldb_comparator_t* cmp) {
- delete cmp;
-}
+void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; }
leveldb_filterpolicy_t* leveldb_filterpolicy_create(
- void* state,
- void (*destructor)(void*),
- char* (*create_filter)(
- void*,
- const char* const* key_array, const size_t* key_length_array,
- int num_keys,
- size_t* filter_length),
- unsigned char (*key_may_match)(
- void*,
- const char* key, size_t length,
- const char* filter, size_t filter_length),
+ void* state, void (*destructor)(void*),
+ char* (*create_filter)(void*, const char* const* key_array,
+ const size_t* key_length_array, int num_keys,
+ size_t* filter_length),
+ unsigned char (*key_may_match)(void*, const char* key, size_t length,
+ const char* filter, size_t filter_length),
const char* (*name)(void*)) {
leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
result->state_ = state;
// they delegate to a NewBloomFilterPolicy() instead of user
// supplied C functions.
struct Wrapper : public leveldb_filterpolicy_t {
- const FilterPolicy* rep_;
+ static void DoNothing(void*) {}
+
~Wrapper() { delete rep_; }
const char* Name() const { return rep_->Name(); }
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
return rep_->KeyMayMatch(key, filter);
}
- static void DoNothing(void*) { }
+
+ const FilterPolicy* rep_;
};
Wrapper* wrapper = new Wrapper;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
return new leveldb_readoptions_t;
}
-void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) {
- delete opt;
-}
+void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
-void leveldb_readoptions_set_verify_checksums(
- leveldb_readoptions_t* opt,
- unsigned char v) {
+void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
+ unsigned char v) {
opt->rep.verify_checksums = v;
}
-void leveldb_readoptions_set_fill_cache(
- leveldb_readoptions_t* opt, unsigned char v) {
+void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt,
+ unsigned char v) {
opt->rep.fill_cache = v;
}
-void leveldb_readoptions_set_snapshot(
- leveldb_readoptions_t* opt,
- const leveldb_snapshot_t* snap) {
+void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt,
+ const leveldb_snapshot_t* snap) {
opt->rep.snapshot = (snap ? snap->rep : nullptr);
}
return new leveldb_writeoptions_t;
}
-void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) {
- delete opt;
-}
+void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
-void leveldb_writeoptions_set_sync(
- leveldb_writeoptions_t* opt, unsigned char v) {
+void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt,
+ unsigned char v) {
opt->rep.sync = v;
}
return buffer;
}
-void leveldb_free(void* ptr) {
- free(ptr);
-}
+void leveldb_free(void* ptr) { free(ptr); }
-int leveldb_major_version() {
- return kMajorVersion;
-}
+int leveldb_major_version() { return kMajorVersion; }
-int leveldb_minor_version() {
- return kMinorVersion;
-}
+int leveldb_minor_version() { return kMinorVersion; }
} // end extern "C"
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include "leveldb/db.h"
-
#include <sys/types.h>
-#include "leveldb/cache.h"
-#include "leveldb/table.h"
-#include "leveldb/write_batch.h"
+
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
#include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/db.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
#include "util/logging.h"
#include "util/testharness.h"
#include "util/testutil.h"
class CorruptionTest {
public:
- test::ErrorEnv env_;
- std::string dbname_;
- Cache* tiny_cache_;
- Options options_;
- DB* db_;
-
- CorruptionTest() {
- tiny_cache_ = NewLRUCache(100);
+ CorruptionTest()
+ : db_(nullptr),
+ dbname_("/memenv/corruption_test"),
+ tiny_cache_(NewLRUCache(100)) {
options_.env = &env_;
options_.block_cache = tiny_cache_;
- dbname_ = "/memenv/corruption_test";
DestroyDB(dbname_, options_);
- db_ = nullptr;
options_.create_if_missing = true;
Reopen();
options_.create_if_missing = false;
}
~CorruptionTest() {
- delete db_;
- delete tiny_cache_;
+ delete db_;
+ delete tiny_cache_;
}
Status TryReopen() {
return DB::Open(options_, dbname_, &db_);
}
- void Reopen() {
- ASSERT_OK(TryReopen());
- }
+ void Reopen() { ASSERT_OK(TryReopen()); }
void RepairDB() {
delete db_;
std::string key_space, value_space;
WriteBatch batch;
for (int i = 0; i < n; i++) {
- //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
+ // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
Slice key = Key(i, &key_space);
batch.Clear();
batch.Put(key, Value(i, &value_space));
// Ignore boundary keys.
continue;
}
- if (!ConsumeDecimalNumber(&in, &key) ||
- !in.empty() ||
+ if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
key < next_expected) {
bad_keys++;
continue;
std::string fname;
int picked_number = -1;
for (size_t i = 0; i < filenames.size(); i++) {
- if (ParseFileName(filenames[i], &number, &type) &&
- type == filetype &&
+ if (ParseFileName(filenames[i], &number, &type) && type == filetype &&
int(number) > picked_number) { // Pick latest file
fname = dbname_ + "/" + filenames[i];
picked_number = number;
Random r(k);
return test::RandomString(&r, kValueSize, storage);
}
+
+ test::ErrorEnv env_;
+ Options options_;
+ DB* db_;
+
+ private:
+ std::string dbname_;
+ Cache* tiny_cache_;
};
TEST(CorruptionTest, Recovery) {
Build(100);
Check(100, 100);
- Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
+ Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
Reopen();
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/types.h>
+
#include "leveldb/cache.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
"fill100K,"
"crc32c,"
"snappycomp,"
- "snappyuncomp,"
- ;
+ "snappyuncomp,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
start++;
}
size_t limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
seconds_ = (finish_ - start_) * 1e-6;
}
- void AddMessage(Slice msg) {
- AppendWithSpace(&message_, msg);
- }
+ void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
void FinishedSingleOp() {
if (FLAGS_histogram) {
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
}
- void AddBytes(int64_t n) {
- bytes_ += n;
- }
+ void AddBytes(int64_t n) { bytes_ += n; }
void Report(const Slice& name) {
// Pretend at least one op was done in case we are running a benchmark
}
AppendWithSpace(&extra, message_);
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- seconds_ * 1e6 / done_,
- (extra.empty() ? "" : " "),
- extra.c_str());
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
}
bool start GUARDED_BY(mu);
SharedState(int total)
- : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) { }
+ : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
};
// Per-thread state for concurrent executions of the same benchmark.
struct ThreadState {
- int tid; // 0..n-1 when running in n threads
- Random rand; // Has different seeds for different threads
+ int tid; // 0..n-1 when running in n threads
+ Random rand; // Has different seeds for different threads
Stats stats;
SharedState* shared;
- ThreadState(int index)
- : tid(index),
- rand(1000 + index) {
- }
+ ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {}
};
} // namespace
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
- (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
- / 1048576.0));
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
}
void PrintEnvironment() {
- fprintf(stderr, "LevelDB: version %d.%d\n",
- kMajorVersion, kMinorVersion);
+ fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
+ kMinorVersion);
#if defined(__linux)
time_t now = time(nullptr);
public:
Benchmark()
- : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
- filter_policy_(FLAGS_bloom_bits >= 0
- ? NewBloomFilterPolicy(FLAGS_bloom_bits)
- : nullptr),
- db_(nullptr),
- num_(FLAGS_num),
- value_size_(FLAGS_value_size),
- entries_per_batch_(1),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- heap_counter_(0) {
+ : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
+ filter_policy_(FLAGS_bloom_bits >= 0
+ ? NewBloomFilterPolicy(FLAGS_bloom_bits)
+ : nullptr),
+ db_(nullptr),
+ num_(FLAGS_num),
+ value_size_(FLAGS_value_size),
+ entries_per_batch_(1),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ heap_counter_(0) {
std::vector<std::string> files;
g_env->GetChildren(FLAGS_db, &files);
for (size_t i = 0; i < files.size(); i++) {
} else if (name == Slice("sstables")) {
PrintStats("leveldb.sstables");
} else {
- if (name != Slice()) { // No error message for empty name
+ if (!name.empty()) { // No error message for empty name
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
}
}
int64_t bytes = 0;
char* uncompressed = new char[input.size()];
while (ok && bytes < 1024 * 1048576) { // Compress 1G
- ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
- uncompressed);
+ ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
+ uncompressed);
bytes += input.size();
thread->stats.FinishedSingleOp();
}
}
}
- void WriteSeq(ThreadState* thread) {
- DoWrite(thread, true);
- }
+ void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
- void WriteRandom(ThreadState* thread) {
- DoWrite(thread, false);
- }
+ void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
void DoWrite(ThreadState* thread, bool seq) {
if (num_ != FLAGS_num) {
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
- const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+ const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
batch.Put(key, gen.Generate(value_size_));
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
- const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+ const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
batch.Delete(key);
}
}
- void DeleteSeq(ThreadState* thread) {
- DoDelete(thread, true);
- }
+ void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
- void DeleteRandom(ThreadState* thread) {
- DoDelete(thread, false);
- }
+ void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
void ReadWhileWriting(ThreadState* thread) {
if (thread->tid > 0) {
}
}
- void Compact(ThreadState* thread) {
- db_->CompactRange(nullptr, nullptr);
- }
+ void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
void PrintStats(const char* key) {
std::string stats;
// Choose a location for the test database if none given with --db=<path>
if (FLAGS_db == nullptr) {
- leveldb::g_env->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ leveldb::g_env->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
// Information kept for every waiting writer
struct DBImpl::Writer {
+ explicit Writer(port::Mutex* mu)
+ : batch(nullptr), sync(false), done(false), cv(mu) {}
+
Status status;
WriteBatch* batch;
bool sync;
bool done;
port::CondVar cv;
-
- explicit Writer(port::Mutex* mu) : cv(mu) { }
};
struct DBImpl::CompactionState {
+ // Files produced by compaction
+ struct Output {
+ uint64_t number;
+ uint64_t file_size;
+ InternalKey smallest, largest;
+ };
+
+ Output* current_output() { return &outputs[outputs.size() - 1]; }
+
+ explicit CompactionState(Compaction* c)
+ : compaction(c),
+ smallest_snapshot(0),
+ outfile(nullptr),
+ builder(nullptr),
+ total_bytes(0) {}
+
Compaction* const compaction;
// Sequence numbers < smallest_snapshot are not significant since we
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;
- // Files produced by compaction
- struct Output {
- uint64_t number;
- uint64_t file_size;
- InternalKey smallest, largest;
- };
std::vector<Output> outputs;
// State kept for output being generated
TableBuilder* builder;
uint64_t total_bytes;
-
- Output* current_output() { return &outputs[outputs.size()-1]; }
-
- explicit CompactionState(Compaction* c)
- : compaction(c),
- outfile(nullptr),
- builder(nullptr),
- total_bytes(0) {
- }
};
// Fix user-supplied options to be reasonable
Options result = src;
result.comparator = icmp;
result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
- ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
- ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
- ClipToRange(&result.max_file_size, 1<<20, 1<<30);
- ClipToRange(&result.block_size, 1<<10, 4<<20);
+ ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
+ ClipToRange(&result.write_buffer_size, 64 << 10, 1 << 30);
+ ClipToRange(&result.max_file_size, 1 << 20, 1 << 30);
+ ClipToRange(&result.block_size, 1 << 10, 4 << 20);
if (result.info_log == nullptr) {
// Open a log file in the same directory as the db
src.env->CreateDir(dbname); // In case it does not exist
if (type == kTableFile) {
table_cache_->Evict(number);
}
- Log(options_.info_log, "Delete type=%d #%lld\n",
- static_cast<int>(type),
+ Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type),
static_cast<unsigned long long>(number));
env_->DeleteFile(dbname_ + "/" + filenames[i]);
}
}
}
-Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
+Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
mutex_.AssertHeld();
// Ignore error from CreateDir since the creation of the DB is
}
} else {
if (options_.error_if_exists) {
- return Status::InvalidArgument(
- dbname_, "exists (error_if_exists is true)");
+ return Status::InvalidArgument(dbname_,
+ "exists (error_if_exists is true)");
}
}
Status* status; // null if options_.paranoid_checks==false
virtual void Corruption(size_t bytes, const Status& s) {
Log(info_log, "%s%s: dropping %d bytes; %s",
- (this->status == nullptr ? "(ignoring error) " : ""),
- fname, static_cast<int>(bytes), s.ToString().c_str());
+ (this->status == nullptr ? "(ignoring error) " : ""), fname,
+ static_cast<int>(bytes), s.ToString().c_str());
if (this->status != nullptr && this->status->ok()) *this->status = s;
}
};
// paranoid_checks==false so that corruptions cause entire commits
// to be skipped instead of propagating bad information (like overly
// large sequence numbers).
- log::Reader reader(file, &reporter, true/*checksum*/,
- 0/*initial_offset*/);
+ log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/);
Log(options_.info_log, "Recovering log #%llu",
- (unsigned long long) log_number);
+ (unsigned long long)log_number);
// Read all the records and add to a memtable
std::string scratch;
WriteBatch batch;
int compactions = 0;
MemTable* mem = nullptr;
- while (reader.ReadRecord(&record, &scratch) &&
- status.ok()) {
+ while (reader.ReadRecord(&record, &scratch) && status.ok()) {
if (record.size() < 12) {
- reporter.Corruption(
- record.size(), Status::Corruption("log record too small"));
+ reporter.Corruption(record.size(),
+ Status::Corruption("log record too small"));
continue;
}
WriteBatchInternal::SetContents(&batch, record);
if (!status.ok()) {
break;
}
- const SequenceNumber last_seq =
- WriteBatchInternal::Sequence(&batch) +
- WriteBatchInternal::Count(&batch) - 1;
+ const SequenceNumber last_seq = WriteBatchInternal::Sequence(&batch) +
+ WriteBatchInternal::Count(&batch) - 1;
if (last_seq > *max_sequence) {
*max_sequence = last_seq;
}
pending_outputs_.insert(meta.number);
Iterator* iter = mem->NewIterator();
Log(options_.info_log, "Level-0 table #%llu: started",
- (unsigned long long) meta.number);
+ (unsigned long long)meta.number);
Status s;
{
}
Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
- (unsigned long long) meta.number,
- (unsigned long long) meta.file_size,
+ (unsigned long long)meta.number, (unsigned long long)meta.file_size,
s.ToString().c_str());
delete iter;
pending_outputs_.erase(meta.number);
-
// Note that if file_size is zero, the file has been deleted and
// should not be added to the manifest.
int level = 0;
if (base != nullptr) {
level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
}
- edit->AddFile(level, meta.number, meta.file_size,
- meta.smallest, meta.largest);
+ edit->AddFile(level, meta.number, meta.file_size, meta.smallest,
+ meta.largest);
}
CompactionStats stats;
// DB is being deleted; no more background compactions
} else if (!bg_error_.ok()) {
// Already got an error; no more changes
- } else if (imm_ == nullptr &&
- manual_compaction_ == nullptr &&
+ } else if (imm_ == nullptr && manual_compaction_ == nullptr &&
!versions_->NeedsCompaction()) {
// No work to be done
} else {
}
Log(options_.info_log,
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
- m->level,
- (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
+ m->level, (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
(m->end ? m->end->DebugString().c_str() : "(end)"),
(m->done ? "(end)" : manual_end.DebugString().c_str()));
} else {
assert(c->num_input_files(0) == 1);
FileMetaData* f = c->input(0, 0);
c->edit()->DeleteFile(c->level(), f->number);
- c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
- f->smallest, f->largest);
+ c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
+ f->largest);
status = versions_->LogAndApply(c->edit(), &mutex_);
if (!status.ok()) {
RecordBackgroundError(status);
}
VersionSet::LevelSummaryStorage tmp;
Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
- static_cast<unsigned long long>(f->number),
- c->level() + 1,
+ static_cast<unsigned long long>(f->number), c->level() + 1,
static_cast<unsigned long long>(f->file_size),
- status.ToString().c_str(),
- versions_->LevelSummary(&tmp));
+ status.ToString().c_str(), versions_->LevelSummary(&tmp));
} else {
CompactionState* compact = new CompactionState(c);
status = DoCompactionWork(compact);
} else if (shutting_down_.load(std::memory_order_acquire)) {
// Ignore compaction errors found during shutting down
} else {
- Log(options_.info_log,
- "Compaction error: %s", status.ToString().c_str());
+ Log(options_.info_log, "Compaction error: %s", status.ToString().c_str());
}
if (is_manual) {
if (s.ok() && current_entries > 0) {
// Verify that the table is usable
- Iterator* iter = table_cache_->NewIterator(ReadOptions(),
- output_number,
- current_bytes);
+ Iterator* iter =
+ table_cache_->NewIterator(ReadOptions(), output_number, current_bytes);
s = iter->status();
delete iter;
if (s.ok()) {
- Log(options_.info_log,
- "Generated table #%llu@%d: %lld keys, %lld bytes",
- (unsigned long long) output_number,
- compact->compaction->level(),
- (unsigned long long) current_entries,
- (unsigned long long) current_bytes);
+ Log(options_.info_log, "Generated table #%llu@%d: %lld keys, %lld bytes",
+ (unsigned long long)output_number, compact->compaction->level(),
+ (unsigned long long)current_entries,
+ (unsigned long long)current_bytes);
}
}
return s;
}
-
Status DBImpl::InstallCompactionResults(CompactionState* compact) {
mutex_.AssertHeld();
- Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
- compact->compaction->num_input_files(0),
- compact->compaction->level(),
- compact->compaction->num_input_files(1),
- compact->compaction->level() + 1,
+ Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
+ compact->compaction->num_input_files(0), compact->compaction->level(),
+ compact->compaction->num_input_files(1), compact->compaction->level() + 1,
static_cast<long long>(compact->total_bytes));
// Add compaction outputs
const int level = compact->compaction->level();
for (size_t i = 0; i < compact->outputs.size(); i++) {
const CompactionState::Output& out = compact->outputs[i];
- compact->compaction->edit()->AddFile(
- level + 1,
- out.number, out.file_size, out.smallest, out.largest);
+ compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size,
+ out.smallest, out.largest);
}
return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
}
const uint64_t start_micros = env_->NowMicros();
int64_t imm_micros = 0; // Micros spent doing imm_ compactions
- Log(options_.info_log, "Compacting %d@%d + %d@%d files",
- compact->compaction->num_input_files(0),
- compact->compaction->level(),
+ Log(options_.info_log, "Compacting %d@%d + %d@%d files",
+ compact->compaction->num_input_files(0), compact->compaction->level(),
compact->compaction->num_input_files(1),
compact->compaction->level() + 1);
std::string current_user_key;
bool has_current_user_key = false;
SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
- for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire); ) {
+ for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire);) {
// Prioritize immutable compaction work
if (has_imm_.load(std::memory_order_relaxed)) {
const uint64_t imm_start = env_->NowMicros();
last_sequence_for_key = kMaxSequenceNumber;
} else {
if (!has_current_user_key ||
- user_comparator()->Compare(ikey.user_key,
- Slice(current_user_key)) != 0) {
+ user_comparator()->Compare(ikey.user_key, Slice(current_user_key)) !=
+ 0) {
// First occurrence of this user key
current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
has_current_user_key = true;
if (last_sequence_for_key <= compact->smallest_snapshot) {
// Hidden by an newer entry for same user key
- drop = true; // (A)
+ drop = true; // (A)
} else if (ikey.type == kTypeDeletion &&
ikey.sequence <= compact->smallest_snapshot &&
compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
RecordBackgroundError(status);
}
VersionSet::LevelSummaryStorage tmp;
- Log(options_.info_log,
- "compacted to: %s", versions_->LevelSummary(&tmp));
+ Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp));
return status;
}
MemTable* const imm GUARDED_BY(mu);
IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version)
- : mu(mutex), version(version), mem(mem), imm(imm) { }
+ : mu(mutex), version(version), mem(mem), imm(imm) {}
};
static void CleanupIteratorState(void* arg1, void* arg2) {
return versions_->MaxNextLevelOverlappingBytes();
}
-Status DBImpl::Get(const ReadOptions& options,
- const Slice& key,
+Status DBImpl::Get(const ReadOptions& options, const Slice& key,
std::string* value) {
Status s;
MutexLock l(&mutex_);
SequenceNumber latest_snapshot;
uint32_t seed;
Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
- return NewDBIterator(
- this, user_comparator(), iter,
- (options.snapshot != nullptr
- ? static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number()
- : latest_snapshot),
- seed);
+ return NewDBIterator(this, user_comparator(), iter,
+ (options.snapshot != nullptr
+ ? static_cast<const SnapshotImpl*>(options.snapshot)
+ ->sequence_number()
+ : latest_snapshot),
+ seed);
}
void DBImpl::RecordReadSample(Slice key) {
return DB::Delete(options, key);
}
-Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
+Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
Writer w(&mutex_);
- w.batch = my_batch;
+ w.batch = updates;
w.sync = options.sync;
w.done = false;
}
// May temporarily unlock and wait.
- Status status = MakeRoomForWrite(my_batch == nullptr);
+ Status status = MakeRoomForWrite(updates == nullptr);
uint64_t last_sequence = versions_->LastSequence();
Writer* last_writer = &w;
- if (status.ok() && my_batch != nullptr) { // nullptr batch is for compactions
+ if (status.ok() && updates != nullptr) { // nullptr batch is for compactions
WriteBatch* updates = BuildBatchGroup(&last_writer);
WriteBatchInternal::SetSequence(updates, last_sequence + 1);
last_sequence += WriteBatchInternal::Count(updates);
// original write is small, limit the growth so we do not slow
// down the small write too much.
size_t max_size = 1 << 20;
- if (size <= (128<<10)) {
- max_size = size + (128<<10);
+ if (size <= (128 << 10)) {
+ max_size = size + (128 << 10);
}
*last_writer = first;
// Yield previous error
s = bg_error_;
break;
- } else if (
- allow_delay &&
- versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
+ } else if (allow_delay && versions_->NumLevelFiles(0) >=
+ config::kL0_SlowdownWritesTrigger) {
// We are getting close to hitting a hard limit on the number of
// L0 files. Rather than delaying a single write by several
// seconds when we hit the hard limit, start delaying each
has_imm_.store(true, std::memory_order_release);
mem_ = new MemTable(internal_comparator_);
mem_->Ref();
- force = false; // Do not force another compaction if have room
+ force = false; // Do not force another compaction if have room
MaybeScheduleCompaction();
}
}
snprintf(buf, sizeof(buf),
" Compactions\n"
"Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
- "--------------------------------------------------\n"
- );
+ "--------------------------------------------------\n");
value->append(buf);
for (int level = 0; level < config::kNumLevels; level++) {
int files = versions_->NumLevelFiles(level);
if (stats_[level].micros > 0 || files > 0) {
- snprintf(
- buf, sizeof(buf),
- "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
- level,
- files,
- versions_->NumLevelBytes(level) / 1048576.0,
- stats_[level].micros / 1e6,
- stats_[level].bytes_read / 1048576.0,
- stats_[level].bytes_written / 1048576.0);
+ snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level,
+ files, versions_->NumLevelBytes(level) / 1048576.0,
+ stats_[level].micros / 1e6,
+ stats_[level].bytes_read / 1048576.0,
+ stats_[level].bytes_written / 1048576.0);
value->append(buf);
}
}
return false;
}
-void DBImpl::GetApproximateSizes(
- const Range* range, int n,
- uint64_t* sizes) {
+void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
// TODO(opt): better implementation
Version* v;
{
return Write(opt, &batch);
}
-DB::~DB() { }
+DB::~DB() {}
-Status DB::Open(const Options& options, const std::string& dbname,
- DB** dbptr) {
+Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
*dbptr = nullptr;
DBImpl* impl = new DBImpl(options, dbname);
return s;
}
-Snapshot::~Snapshot() {
-}
+Snapshot::~Snapshot() {}
Status DestroyDB(const std::string& dbname, const Options& options) {
Env* env = options.env;
class DBImpl : public DB {
public:
DBImpl(const Options& options, const std::string& dbname);
+
+ DBImpl(const DBImpl&) = delete;
+ DBImpl& operator=(const DBImpl&) = delete;
+
virtual ~DBImpl();
// Implementations of the DB interface
virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
virtual Status Delete(const WriteOptions&, const Slice& key);
virtual Status Write(const WriteOptions& options, WriteBatch* updates);
- virtual Status Get(const ReadOptions& options,
- const Slice& key,
+ virtual Status Get(const ReadOptions& options, const Slice& key,
std::string* value);
virtual Iterator* NewIterator(const ReadOptions&);
virtual const Snapshot* GetSnapshot();
struct CompactionState;
struct Writer;
+ // Information for a manual compaction
+ struct ManualCompaction {
+ int level;
+ bool done;
+ const InternalKey* begin; // null means beginning of key range
+ const InternalKey* end; // null means end of key range
+ InternalKey tmp_storage; // Used to keep track of compaction progress
+ };
+
+ // Per level compaction stats. stats_[level] stores the stats for
+ // compactions that produced data for the specified "level".
+ struct CompactionStats {
+ CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
+
+ void Add(const CompactionStats& c) {
+ this->micros += c.micros;
+ this->bytes_read += c.bytes_read;
+ this->bytes_written += c.bytes_written;
+ }
+
+ int64_t micros;
+ int64_t bytes_read;
+ int64_t bytes_written;
+ };
+
Iterator* NewInternalIterator(const ReadOptions&,
SequenceNumber* latest_snapshot,
uint32_t* seed);
Status InstallCompactionResults(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ const Comparator* user_comparator() const {
+ return internal_comparator_.user_comparator();
+ }
+
// Constant after construction
Env* const env_;
const InternalKeyComparator internal_comparator_;
// Has a background compaction been scheduled or is running?
bool background_compaction_scheduled_ GUARDED_BY(mutex_);
- // Information for a manual compaction
- struct ManualCompaction {
- int level;
- bool done;
- const InternalKey* begin; // null means beginning of key range
- const InternalKey* end; // null means end of key range
- InternalKey tmp_storage; // Used to keep track of compaction progress
- };
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
VersionSet* const versions_;
// Have we encountered a background error in paranoid mode?
Status bg_error_ GUARDED_BY(mutex_);
- // Per level compaction stats. stats_[level] stores the stats for
- // compactions that produced data for the specified "level".
- struct CompactionStats {
- int64_t micros;
- int64_t bytes_read;
- int64_t bytes_written;
-
- CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { }
-
- void Add(const CompactionStats& c) {
- this->micros += c.micros;
- this->bytes_read += c.bytes_read;
- this->bytes_written += c.bytes_written;
- }
- };
CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
-
- // No copying allowed
- DBImpl(const DBImpl&);
- void operator=(const DBImpl&);
-
- const Comparator* user_comparator() const {
- return internal_comparator_.user_comparator();
- }
};
// Sanitize db options. The caller should delete result.info_log if
#include "db/db_iter.h"
-#include "db/filename.h"
#include "db/db_impl.h"
#include "db/dbformat.h"
+#include "db/filename.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
#include "port/port.h"
// combines multiple entries for the same userkey found in the DB
// representation into a single entry while accounting for sequence
// numbers, deletion markers, overwrites, etc.
-class DBIter: public Iterator {
+class DBIter : public Iterator {
public:
// Which direction is the iterator currently moving?
// (1) When moving forward, the internal iterator is positioned at
// the exact entry that yields this->key(), this->value()
// (2) When moving backwards, the internal iterator is positioned
// just before all entries whose user key == this->key().
- enum Direction {
- kForward,
- kReverse
- };
+ enum Direction { kForward, kReverse };
DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
uint32_t seed)
direction_(kForward),
valid_(false),
rnd_(seed),
- bytes_until_read_sampling_(RandomCompactionPeriod()) {
- }
- virtual ~DBIter() {
- delete iter_;
- }
+ bytes_until_read_sampling_(RandomCompactionPeriod()) {}
+
+ DBIter(const DBIter&) = delete;
+ DBIter& operator=(const DBIter&) = delete;
+
+ virtual ~DBIter() { delete iter_; }
virtual bool Valid() const { return valid_; }
virtual Slice key() const {
assert(valid_);
// Picks the number of bytes that can be read until a compaction is scheduled.
size_t RandomCompactionPeriod() {
- return rnd_.Uniform(2*config::kReadBytesPeriod);
+ return rnd_.Uniform(2 * config::kReadBytesPeriod);
}
DBImpl* db_;
const Comparator* const user_comparator_;
Iterator* const iter_;
SequenceNumber const sequence_;
-
Status status_;
- std::string saved_key_; // == current key when direction_==kReverse
- std::string saved_value_; // == current raw value when direction_==kReverse
+ std::string saved_key_; // == current key when direction_==kReverse
+ std::string saved_value_; // == current raw value when direction_==kReverse
Direction direction_;
bool valid_;
-
Random rnd_;
size_t bytes_until_read_sampling_;
-
- // No copying allowed
- DBIter(const DBIter&);
- void operator=(const DBIter&);
};
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
ClearSavedValue();
return;
}
- if (user_comparator_->Compare(ExtractUserKey(iter_->key()),
- saved_key_) < 0) {
+ if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) <
+ 0) {
break;
}
}
direction_ = kForward;
ClearSavedValue();
saved_key_.clear();
- AppendInternalKey(
- &saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek));
+ AppendInternalKey(&saved_key_,
+ ParsedInternalKey(target, sequence_, kValueTypeForSeek));
iter_->Seek(saved_key_);
if (iter_->Valid()) {
FindNextUserEntry(false, &saved_key_ /* temporary storage */);
} // anonymous namespace
-Iterator* NewDBIterator(
- DBImpl* db,
- const Comparator* user_key_comparator,
- Iterator* internal_iter,
- SequenceNumber sequence,
- uint32_t seed) {
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+ Iterator* internal_iter, SequenceNumber sequence,
+ uint32_t seed) {
return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
}
#define STORAGE_LEVELDB_DB_DB_ITER_H_
#include <stdint.h>
-#include "leveldb/db.h"
+
#include "db/dbformat.h"
+#include "leveldb/db.h"
namespace leveldb {
// Return a new iterator that converts internal keys (yielded by
// "*internal_iter") that were live at the specified "sequence" number
// into appropriate user keys.
-Iterator* NewDBIterator(DBImpl* db,
- const Comparator* user_key_comparator,
- Iterator* internal_iter,
- SequenceNumber sequence,
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+ Iterator* internal_iter, SequenceNumber sequence,
uint32_t seed);
} // namespace leveldb
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "leveldb/db.h"
+
#include <atomic>
#include <string>
-#include "leveldb/db.h"
-#include "leveldb/filter_policy.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "leveldb/cache.h"
#include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
#include "leveldb/table.h"
#include "port/port.h"
#include "port/thread_annotations.h"
}
static std::string RandomKey(Random* rnd) {
- int len = (rnd->OneIn(3)
- ? 1 // Short sometimes to encourage collisions
- : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
+ int len =
+ (rnd->OneIn(3) ? 1 // Short sometimes to encourage collisions
+ : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
return test::RandomKey(rnd, len);
}
namespace {
class AtomicCounter {
- private:
- port::Mutex mu_;
- int count_ GUARDED_BY(mu_);
public:
- AtomicCounter() : count_(0) { }
- void Increment() {
- IncrementBy(1);
- }
+ AtomicCounter() : count_(0) {}
+ void Increment() { IncrementBy(1); }
void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
MutexLock l(&mu_);
count_ += count;
MutexLock l(&mu_);
count_ = 0;
}
+
+ private:
+ port::Mutex mu_;
+ int count_ GUARDED_BY(mu_);
};
void DelayMilliseconds(int millis) {
bool count_random_reads_;
AtomicCounter random_read_counter_;
- explicit SpecialEnv(Env* base) : EnvWrapper(base),
- delay_data_sync_(false),
- data_sync_error_(false),
- no_space_(false),
- non_writable_(false),
- manifest_sync_error_(false),
- manifest_write_error_(false),
- count_random_reads_(false) {
- }
+ explicit SpecialEnv(Env* base)
+ : EnvWrapper(base),
+ delay_data_sync_(false),
+ data_sync_error_(false),
+ no_space_(false),
+ non_writable_(false),
+ manifest_sync_error_(false),
+ manifest_write_error_(false),
+ count_random_reads_(false) {}
Status NewWritableFile(const std::string& f, WritableFile** r) {
class DataFile : public WritableFile {
WritableFile* const base_;
public:
- DataFile(SpecialEnv* env, WritableFile* base)
- : env_(env),
- base_(base) {
- }
+ DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
~DataFile() { delete base_; }
Status Append(const Slice& data) {
if (env_->no_space_.load(std::memory_order_acquire)) {
private:
SpecialEnv* env_;
WritableFile* base_;
+
public:
- ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
+ ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
~ManifestFile() { delete base_; }
Status Append(const Slice& data) {
if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
private:
RandomAccessFile* target_;
AtomicCounter* counter_;
+
public:
CountingFile(RandomAccessFile* target, AtomicCounter* counter)
- : target_(target), counter_(counter) {
- }
+ : target_(target), counter_(counter) {}
virtual ~CountingFile() { delete target_; }
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const {
};
class DBTest {
- private:
- const FilterPolicy* filter_policy_;
-
- // Sequence of option configurations to try
- enum OptionConfig {
- kDefault,
- kReuse,
- kFilter,
- kUncompressed,
- kEnd
- };
- int option_config_;
-
public:
std::string dbname_;
SpecialEnv* env_;
Options last_options_;
- DBTest() : option_config_(kDefault),
- env_(new SpecialEnv(Env::Default())) {
+ DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options());
return options;
}
- DBImpl* dbfull() {
- return reinterpret_cast<DBImpl*>(db_);
- }
+ DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
- void Reopen(Options* options = nullptr) {
- ASSERT_OK(TryReopen(options));
- }
+ void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
void Close() {
delete db_;
return db_->Put(WriteOptions(), k, v);
}
- Status Delete(const std::string& k) {
- return db_->Delete(WriteOptions(), k);
- }
+ Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
ReadOptions options;
int NumTableFilesAtLevel(int level) {
std::string property;
- ASSERT_TRUE(
- db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
- &property));
+ ASSERT_TRUE(db_->GetProperty(
+ "leveldb.num-files-at-level" + NumberToString(level), &property));
return std::stoi(property);
}
void DumpFileCounts(const char* label) {
fprintf(stderr, "---\n%s:\n", label);
- fprintf(stderr, "maxoverlap: %lld\n",
- static_cast<long long>(
- dbfull()->TEST_MaxNextLevelOverlappingBytes()));
+ fprintf(
+ stderr, "maxoverlap: %lld\n",
+ static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
for (int level = 0; level < config::kNumLevels; level++) {
int num = NumTableFilesAtLevel(level);
if (num > 0) {
}
return files_renamed;
}
+
+ private:
+ // Sequence of option configurations to try
+ enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
+
+ const FilterPolicy* filter_policy_;
+ int option_config_;
};
TEST(DBTest, Empty) {
// Block sync calls.
env_->delay_data_sync_.store(true, std::memory_order_release);
- Put("k1", std::string(100000, 'x')); // Fill memtable.
- Put("k2", std::string(100000, 'y')); // Trigger compaction.
+ Put("k1", std::string(100000, 'x')); // Fill memtable.
+ Put("k2", std::string(100000, 'y')); // Trigger compaction.
ASSERT_EQ("v1", Get("foo"));
// Release sync calls.
env_->delay_data_sync_.store(false, std::memory_order_release);
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
int mem_usage = std::stoi(val);
ASSERT_GT(mem_usage, 0);
- ASSERT_LT(mem_usage, 5*1024*1024);
+ ASSERT_LT(mem_usage, 5 * 1024 * 1024);
} while (ChangeOptions());
}
// Step 1: First place sstables in levels 0 and 2
int compaction_count = 0;
- while (NumTableFilesAtLevel(0) == 0 ||
- NumTableFilesAtLevel(2) == 0) {
+ while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
compaction_count++;
Put("a", "begin");
ASSERT_EQ(IterStatus(iter), "b->vb");
// Make sure iter stays at snapshot
- ASSERT_OK(Put("a", "va2"));
+ ASSERT_OK(Put("a", "va2"));
ASSERT_OK(Put("a2", "va3"));
- ASSERT_OK(Put("b", "vb2"));
- ASSERT_OK(Put("c", "vc2"));
+ ASSERT_OK(Put("b", "vb2"));
+ ASSERT_OK(Put("c", "vc2"));
ASSERT_OK(Delete("b"));
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
TEST(DBTest, CompactionsGenerateMultipleFiles) {
Options options = CurrentOptions();
- options.write_buffer_size = 100000000; // Large write buffer
+ options.write_buffer_size = 100000000; // Large write buffer
Reopen(&options);
Random rnd(301);
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
// Make sparse update
- Put("A", "va2");
+ Put("A", "va2");
Put("B100", "bvalue2");
- Put("C", "vc2");
+ Put("C", "vc2");
dbfull()->TEST_CompactMemTable();
// Compactions should not cause us to create a situation where
// a file overlaps too much data at the next level.
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
- ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+ ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
}
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
- (unsigned long long)(val),
- (unsigned long long)(low),
+ (unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
TEST(DBTest, ApproximateSizes) {
do {
Options options = CurrentOptions();
- options.write_buffer_size = 100000000; // Large write buffer
+ options.write_buffer_size = 100000000; // Large write buffer
options.compression = kNoCompression;
DestroyAndReopen();
for (int compact_start = 0; compact_start < N; compact_start += 10) {
for (int i = 0; i < N; i += 10) {
- ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
- ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
- ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
+ ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
+ ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
+ S2 * (i + 1)));
+ ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
}
- ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
- ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
+ ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
+ ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
std::string cstart_str = Key(compact_start);
std::string cend_str = Key(compact_start + 9);
Put("pastfoo", "v");
const Snapshot* snapshot = db_->GetSnapshot();
Put("foo", "tiny");
- Put("pastfoo2", "v2"); // Advance sequence number one more
+ Put("pastfoo2", "v2"); // Advance sequence number one more
ASSERT_OK(dbfull()->TEST_CompactMemTable());
ASSERT_GT(NumTableFilesAtLevel(0), 0);
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
- ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+ ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
Delete("foo");
Put("foo", "v2");
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
Slice z("z");
- dbfull()->TEST_CompactRange(last-2, nullptr, &z);
+ dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
// DEL eliminated, but v1 remains because we aren't compacting that level
// (DEL can be eliminated because v2 hides v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
- dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
+ dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
- ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+ ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
+ dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
// DEL kept: "last" file overlaps
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
- dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
+ dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
do {
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
- // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
+ // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
+ // 0.
ASSERT_OK(Put("100", "v100"));
ASSERT_OK(Put("999", "v999"));
dbfull()->TEST_CompactMemTable();
return ToNumber(a) - ToNumber(b);
}
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
- ToNumber(*s); // Check format
- ToNumber(l); // Check format
+ ToNumber(*s); // Check format
+ ToNumber(l); // Check format
}
virtual void FindShortSuccessor(std::string* key) const {
- ToNumber(*key); // Check format
+ ToNumber(*key); // Check format
}
+
private:
static int ToNumber(const Slice& x) {
// Check that there are no extra characters.
- ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
+ ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
<< EscapeString(x);
int val;
char ignored;
Options new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
- new_options.filter_policy = nullptr; // Cannot use bloom filters
+ new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options);
ASSERT_OK(Put("[10]", "ten"));
for (int run = 0; run < 2; run++) {
for (int i = 0; i < 1000; i++) {
char buf[100];
- snprintf(buf, sizeof(buf), "[%d]", i*10);
+ snprintf(buf, sizeof(buf), "[%d]", i * 10);
ASSERT_OK(Put(buf, buf));
}
Compact("[0]", "[1000000]");
// Force out-of-space errors.
env_->no_space_.store(true, std::memory_order_release);
for (int i = 0; i < 10; i++) {
- for (int level = 0; level < config::kNumLevels-1; level++) {
+ for (int level = 0; level < config::kNumLevels - 1; level++) {
dbfull()->TEST_CompactRange(level, nullptr, nullptr);
}
}
// We iterate twice. In the second iteration, everything is the
// same except the log record never makes it to the MANIFEST file.
for (int iter = 0; iter < 2; iter++) {
- std::atomic<bool>* error_type = (iter == 0)
- ? &env_->manifest_sync_error_
- : &env_->manifest_write_error_;
+ std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
+ : &env_->manifest_write_error_;
// Insert foo=>bar mapping
Options options = CurrentOptions();
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("bar", Get("foo"));
const int last = config::kMaxMemCompactLevel;
- ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
+ ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
// Merging compaction (will fail)
error_type->store(true, std::memory_order_release);
options.paranoid_checks = true;
Status s = TryReopen(&options);
ASSERT_TRUE(!s.ok());
- ASSERT_TRUE(s.ToString().find("issing") != std::string::npos)
- << s.ToString();
+ ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
}
TEST(DBTest, StillReadSST) {
int reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d present => %d reads\n", N, reads);
ASSERT_GE(reads, N);
- ASSERT_LE(reads, N + 2*N/100);
+ ASSERT_LE(reads, N + 2 * N / 100);
// Lookup present keys. Should rarely read from either sstable.
env_->random_read_counter_.Reset();
}
reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d missing => %d reads\n", N, reads);
- ASSERT_LE(reads, 3*N/100);
+ ASSERT_LE(reads, 3 * N / 100);
env_->delay_data_sync_.store(false, std::memory_order_release);
Close();
if (rnd.OneIn(2)) {
// Write values of the form <key, my id, counter>.
// We add some padding for force compactions.
- snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
- key, id, static_cast<int>(counter));
+ snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
+ static_cast<int>(counter));
ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
} else {
// Read a value and verify that it matches the pattern written above.
typedef std::map<std::string, std::string> KVMap;
}
-class ModelDB: public DB {
+class ModelDB : public DB {
public:
class ModelSnapshot : public Snapshot {
public:
KVMap map_;
};
- explicit ModelDB(const Options& options): options_(options) { }
- ~ModelDB() { }
+ explicit ModelDB(const Options& options) : options_(options) {}
+ ~ModelDB() {}
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
return DB::Put(o, k, v);
}
virtual Status Delete(const WriteOptions& o, const Slice& key) {
return DB::Delete(o, key);
}
- virtual Status Get(const ReadOptions& options,
- const Slice& key, std::string* value) {
- assert(false); // Not implemented
+ virtual Status Get(const ReadOptions& options, const Slice& key,
+ std::string* value) {
+ assert(false); // Not implemented
return Status::NotFound(key);
}
virtual Iterator* NewIterator(const ReadOptions& options) {
virtual void Put(const Slice& key, const Slice& value) {
(*map_)[key.ToString()] = value.ToString();
}
- virtual void Delete(const Slice& key) {
- map_->erase(key.ToString());
- }
+ virtual void Delete(const Slice& key) { map_->erase(key.ToString()); }
};
Handler handler;
handler.map_ = &map_;
sizes[i] = 0;
}
}
- virtual void CompactRange(const Slice* start, const Slice* end) {
- }
+ virtual void CompactRange(const Slice* start, const Slice* end) {}
private:
- class ModelIter: public Iterator {
+ class ModelIter : public Iterator {
public:
ModelIter(const KVMap* map, bool owned)
- : map_(map), owned_(owned), iter_(map_->end()) {
- }
+ : map_(map), owned_(owned), iter_(map_->end()) {}
~ModelIter() {
if (owned_) delete map_;
}
KVMap map_;
};
-static bool CompareIterators(int step,
- DB* model,
- DB* db,
+static bool CompareIterators(int step, DB* model, DB* db,
const Snapshot* model_snap,
const Snapshot* db_snap) {
ReadOptions options;
bool ok = true;
int count = 0;
for (miter->SeekToFirst(), dbiter->SeekToFirst();
- ok && miter->Valid() && dbiter->Valid();
- miter->Next(), dbiter->Next()) {
+ ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
count++;
if (miter->key().compare(dbiter->key()) != 0) {
- fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
- step,
+ fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
EscapeString(miter->key()).c_str(),
EscapeString(dbiter->key()).c_str());
ok = false;
if (miter->value().compare(dbiter->value()) != 0) {
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
- step,
- EscapeString(miter->key()).c_str(),
+ step, EscapeString(miter->key()).c_str(),
EscapeString(miter->value()).c_str(),
EscapeString(miter->value()).c_str());
ok = false;
}
// TODO(sanjay): Test Get() works
int p = rnd.Uniform(100);
- if (p < 45) { // Put
+ if (p < 45) { // Put
k = RandomKey(&rnd);
- v = RandomString(&rnd,
- rnd.OneIn(20)
- ? 100 + rnd.Uniform(100)
- : rnd.Uniform(8));
+ v = RandomString(
+ &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
ASSERT_OK(model.Put(WriteOptions(), k, v));
ASSERT_OK(db_->Put(WriteOptions(), k, v));
- } else if (p < 90) { // Delete
+ } else if (p < 90) { // Delete
k = RandomKey(&rnd);
ASSERT_OK(model.Delete(WriteOptions(), k));
ASSERT_OK(db_->Delete(WriteOptions(), k));
-
- } else { // Multi-element batch
+ } else { // Multi-element batch
WriteBatch b;
const int num = rnd.Uniform(8);
for (int i = 0; i < num; i++) {
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
- InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
- InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
}
ASSERT_OK(vset.LogAndApply(&vbase, &mu));
for (int i = 0; i < iters; i++) {
VersionEdit vedit;
vedit.DeleteFile(2, fnum);
- InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
- InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+ InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+ InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
vset.LogAndApply(&vedit, &mu);
}
char buf[16];
snprintf(buf, sizeof(buf), "%d", num_base_files);
fprintf(stderr,
- "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
- buf, iters, us, ((float)us) / iters);
+ "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf,
+ iters, us, ((float)us) / iters);
}
} // namespace leveldb
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#include <stdio.h>
#include "db/dbformat.h"
+
+#include <stdio.h>
+
#include "port/port.h"
#include "util/coding.h"
std::string ParsedInternalKey::DebugString() const {
char buf[50];
- snprintf(buf, sizeof(buf), "' @ %llu : %d",
- (unsigned long long) sequence,
+ snprintf(buf, sizeof(buf), "' @ %llu : %d", (unsigned long long)sequence,
int(type));
std::string result = "'";
result += EscapeString(user_key.ToString());
return r;
}
-void InternalKeyComparator::FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+void InternalKeyComparator::FindShortestSeparator(std::string* start,
+ const Slice& limit) const {
// Attempt to shorten the user portion of the key
Slice user_start = ExtractUserKey(*start);
Slice user_limit = ExtractUserKey(limit);
user_comparator_->Compare(user_start, tmp) < 0) {
// User key has become shorter physically, but larger logically.
// Tack on the earliest possible number to the shortened user key.
- PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+ PutFixed64(&tmp,
+ PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*start, tmp) < 0);
assert(this->Compare(tmp, limit) < 0);
start->swap(tmp);
user_comparator_->Compare(user_key, tmp) < 0) {
// User key has become shorter physically, but larger logically.
// Tack on the earliest possible number to the shortened user key.
- PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+ PutFixed64(&tmp,
+ PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*key, tmp) < 0);
key->swap(tmp);
}
}
-const char* InternalFilterPolicy::Name() const {
- return user_policy_->Name();
-}
+const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
std::string* dst) const {
#define STORAGE_LEVELDB_DB_DBFORMAT_H_
#include <stdio.h>
+
#include "leveldb/comparator.h"
#include "leveldb/db.h"
#include "leveldb/filter_policy.h"
// Value types encoded as the last component of internal keys.
// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
// data structures.
-enum ValueType {
- kTypeDeletion = 0x0,
- kTypeValue = 0x1
-};
+enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 };
// kValueTypeForSeek defines the ValueType that should be passed when
// constructing a ParsedInternalKey object for seeking to a particular
// sequence number (since we sort sequence numbers in decreasing order
// We leave eight bits empty at the bottom so a type and sequence#
// can be packed together into 64-bits.
-static const SequenceNumber kMaxSequenceNumber =
- ((0x1ull << 56) - 1);
+static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
struct ParsedInternalKey {
Slice user_key;
SequenceNumber sequence;
ValueType type;
- ParsedInternalKey() { } // Intentionally left uninitialized (for speed)
+ ParsedInternalKey() {} // Intentionally left uninitialized (for speed)
ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
- : user_key(u), sequence(seq), type(t) { }
+ : user_key(u), sequence(seq), type(t) {}
std::string DebugString() const;
};
class InternalKeyComparator : public Comparator {
private:
const Comparator* user_comparator_;
+
public:
- explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { }
+ explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {}
virtual const char* Name() const;
virtual int Compare(const Slice& a, const Slice& b) const;
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const;
+ virtual void FindShortestSeparator(std::string* start,
+ const Slice& limit) const;
virtual void FindShortSuccessor(std::string* key) const;
const Comparator* user_comparator() const { return user_comparator_; }
class InternalFilterPolicy : public FilterPolicy {
private:
const FilterPolicy* const user_policy_;
+
public:
- explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { }
+ explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {}
virtual const char* Name() const;
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
class InternalKey {
private:
std::string rep_;
+
public:
- InternalKey() { } // Leave rep_ as empty to indicate it is invalid
+ InternalKey() {} // Leave rep_ as empty to indicate it is invalid
InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
}
std::string DebugString() const;
};
-inline int InternalKeyComparator::Compare(
- const InternalKey& a, const InternalKey& b) const {
+inline int InternalKeyComparator::Compare(const InternalKey& a,
+ const InternalKey& b) const {
return Compare(a.Encode(), b.Encode());
}
// the specified sequence number.
LookupKey(const Slice& user_key, SequenceNumber sequence);
+ LookupKey(const LookupKey&) = delete;
+ LookupKey& operator=(const LookupKey&) = delete;
+
~LookupKey();
// Return a key suitable for lookup in a MemTable.
const char* start_;
const char* kstart_;
const char* end_;
- char space_[200]; // Avoid allocation for short keys
-
- // No copying allowed
- LookupKey(const LookupKey&);
- void operator=(const LookupKey&);
+ char space_[200]; // Avoid allocation for short keys
};
inline LookupKey::~LookupKey() {
namespace leveldb {
-static std::string IKey(const std::string& user_key,
- uint64_t seq,
+static std::string IKey(const std::string& user_key, uint64_t seq,
ValueType vt) {
std::string encoded;
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
return result;
}
-static void TestKey(const std::string& key,
- uint64_t seq,
- ValueType vt) {
+static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
std::string encoded = IKey(key, seq, vt);
Slice in(encoded);
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
}
-class FormatTest { };
+class FormatTest {};
TEST(FormatTest, InternalKey_EncodeDecode) {
- const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
- const uint64_t seq[] = {
- 1, 2, 3,
- (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
- (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
- (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
- };
+ const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
+ const uint64_t seq[] = {1,
+ 2,
+ 3,
+ (1ull << 8) - 1,
+ 1ull << 8,
+ (1ull << 8) + 1,
+ (1ull << 16) - 1,
+ 1ull << 16,
+ (1ull << 16) + 1,
+ (1ull << 32) - 1,
+ 1ull << 32,
+ (1ull << 32) + 1};
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
TestKey(keys[k], seq[s], kTypeValue);
TEST(FormatTest, InternalKeyShortSeparator) {
// When user keys are same
ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 99, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 101, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 100, kTypeValue)));
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foo", 100, kTypeDeletion)));
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
// When user keys are misordered
ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("bar", 99, kTypeValue)));
+ Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
// When user keys are different, but correctly ordered
- ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("hello", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
// When start user key is prefix of limit user key
- ASSERT_EQ(IKey("foo", 100, kTypeValue),
- Shorten(IKey("foo", 100, kTypeValue),
- IKey("foobar", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foo", 100, kTypeValue),
+ Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
// When limit user key is prefix of start user key
- ASSERT_EQ(IKey("foobar", 100, kTypeValue),
- Shorten(IKey("foobar", 100, kTypeValue),
- IKey("foo", 200, kTypeValue)));
+ ASSERT_EQ(
+ IKey("foobar", 100, kTypeValue),
+ Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
}
TEST(FormatTest, InternalKeyShortestSuccessor) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
// Notified when log reader encounters corruption.
class CorruptionReporter : public log::Reader::Reporter {
public:
- WritableFile* dst_;
virtual void Corruption(size_t bytes, const Status& status) {
std::string r = "corruption: ";
AppendNumberTo(&r, bytes);
r.push_back('\n');
dst_->Append(r);
}
+
+ WritableFile* dst_;
};
// Print contents of a log file. (*func)() is called on every record.
// Called on every item found in a WriteBatch.
class WriteBatchItemPrinter : public WriteBatch::Handler {
public:
- WritableFile* dst_;
virtual void Put(const Slice& key, const Slice& value) {
std::string r = " put '";
AppendEscapedStringTo(&r, key);
r += "'\n";
dst_->Append(r);
}
-};
+ WritableFile* dst_;
+};
// Called on every log record (each one of which is a WriteBatch)
// found in a kLogFile.
return Status::InvalidArgument(fname + ": unknown file type");
}
switch (ftype) {
- case kLogFile: return DumpLog(env, fname, dst);
- case kDescriptorFile: return DumpDescriptor(env, fname, dst);
- case kTableFile: return DumpTable(env, fname, dst);
+ case kLogFile:
+ return DumpLog(env, fname, dst);
+ case kDescriptorFile:
+ return DumpDescriptor(env, fname, dst);
+ case kTableFile:
+ return DumpTable(env, fname, dst);
default:
break;
}
#include <map>
#include <set>
-#include "leveldb/db.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/log_format.h"
#include "db/version_set.h"
#include "leveldb/cache.h"
+#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/table.h"
#include "leveldb/write_batch.h"
SequentialFile* orig_file;
Status s = env->NewSequentialFile(filename, &orig_file);
- if (!s.ok())
- return s;
+ if (!s.ok()) return s;
char* scratch = new char[length];
leveldb::Slice result;
: filename_(filename),
pos_(-1),
pos_at_last_sync_(-1),
- pos_at_last_flush_(-1) { }
+ pos_at_last_flush_(-1) {}
FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
// is written to or sync'ed.
class TestWritableFile : public WritableFile {
public:
- TestWritableFile(const FileState& state,
- WritableFile* f,
+ TestWritableFile(const FileState& state, WritableFile* f,
FaultInjectionTestEnv* env);
virtual ~TestWritableFile();
virtual Status Append(const Slice& data);
public:
FaultInjectionTestEnv()
: EnvWrapper(Env::Default()), filesystem_active_(true) {}
- virtual ~FaultInjectionTestEnv() { }
+ virtual ~FaultInjectionTestEnv() {}
virtual Status NewWritableFile(const std::string& fname,
WritableFile** result);
virtual Status NewAppendableFile(const std::string& fname,
bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes
};
-TestWritableFile::TestWritableFile(const FileState& state,
- WritableFile* f,
+TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f,
FaultInjectionTestEnv* env)
- : state_(state),
- target_(f),
- writable_file_opened_(true),
- env_(env) {
+ : state_(state), target_(f), writable_file_opened_(true), env_(env) {
assert(f != nullptr);
}
delete env_;
}
- void ReuseLogs(bool reuse) {
- options_.reuse_logs = reuse;
- }
+ void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; }
void Build(int start_idx, int num_vals) {
std::string key_space, value_space;
}
void PartialCompactTestReopenWithFault(ResetMethod reset_method,
- int num_pre_sync,
- int num_post_sync) {
+ int num_pre_sync, int num_post_sync) {
env_->SetFilesystemActive(false);
CloseDB();
ResetDBState(reset_method);
ASSERT_OK(OpenDB());
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
- ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
+ ASSERT_OK(Verify(num_pre_sync, num_post_sync,
+ FaultInjectionTest::VAL_EXPECT_ERROR));
}
- void NoWriteTestPreFault() {
- }
+ void NoWriteTestPreFault() {}
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
CloseDB();
int num_post_sync = rnd.Uniform(kMaxNumValues);
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
- PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
- num_pre_sync,
+ PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync,
num_post_sync);
NoWriteTestPreFault();
// No new files created so we expect all values since no files will be
// dropped.
PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
- num_pre_sync + num_post_sync,
- 0);
+ num_pre_sync + num_post_sync, 0);
NoWriteTestPreFault();
NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "db/filename.h"
+
#include <ctype.h>
#include <stdio.h>
-#include "db/filename.h"
+
#include "db/dbformat.h"
#include "leveldb/env.h"
#include "util/logging.h"
const char* suffix) {
char buf[100];
snprintf(buf, sizeof(buf), "/%06llu.%s",
- static_cast<unsigned long long>(number),
- suffix);
+ static_cast<unsigned long long>(number), suffix);
return dbname + buf;
}
return dbname + "/CURRENT";
}
-std::string LockFileName(const std::string& dbname) {
- return dbname + "/LOCK";
-}
+std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
std::string TempFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return dbname + "/LOG.old";
}
-
// Owned filenames have the form:
// dbname/CURRENT
// dbname/LOCK
// dbname/LOG.old
// dbname/MANIFEST-[0-9]+
// dbname/[0-9]+.(log|sst|ldb)
-bool ParseFileName(const std::string& filename,
- uint64_t* number,
+bool ParseFileName(const std::string& filename, uint64_t* number,
FileType* type) {
Slice rest(filename);
if (rest == "CURRENT") {
#define STORAGE_LEVELDB_DB_FILENAME_H_
#include <stdint.h>
+
#include <string>
+
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "port/port.h"
// If filename is a leveldb file, store the type of the file in *type.
// The number encoded in the filename is stored in *number. If the
// filename was successfully parsed, returns true. Else return false.
-bool ParseFileName(const std::string& filename,
- uint64_t* number,
+bool ParseFileName(const std::string& filename, uint64_t* number,
FileType* type);
// Make the CURRENT file point to the descriptor file with the
namespace leveldb {
-class FileNameTest { };
+class FileNameTest {};
TEST(FileNameTest, Parse) {
Slice db;
uint64_t number;
FileType type;
} cases[] = {
- { "100.log", 100, kLogFile },
- { "0.log", 0, kLogFile },
- { "0.sst", 0, kTableFile },
- { "0.ldb", 0, kTableFile },
- { "CURRENT", 0, kCurrentFile },
- { "LOCK", 0, kDBLockFile },
- { "MANIFEST-2", 2, kDescriptorFile },
- { "MANIFEST-7", 7, kDescriptorFile },
- { "LOG", 0, kInfoLogFile },
- { "LOG.old", 0, kInfoLogFile },
- { "18446744073709551615.log", 18446744073709551615ull, kLogFile },
+ {"100.log", 100, kLogFile},
+ {"0.log", 0, kLogFile},
+ {"0.sst", 0, kTableFile},
+ {"0.ldb", 0, kTableFile},
+ {"CURRENT", 0, kCurrentFile},
+ {"LOCK", 0, kDBLockFile},
+ {"MANIFEST-2", 2, kDescriptorFile},
+ {"MANIFEST-7", 7, kDescriptorFile},
+ {"LOG", 0, kInfoLogFile},
+ {"LOG.old", 0, kInfoLogFile},
+ {"18446744073709551615.log", 18446744073709551615ull, kLogFile},
};
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
std::string f = cases[i].fname;
}
// Errors
- static const char* errors[] = {
- "",
- "foo",
- "foo-dx-100.log",
- ".log",
- "",
- "manifest",
- "CURREN",
- "CURRENTX",
- "MANIFES",
- "MANIFEST",
- "MANIFEST-",
- "XMANIFEST-3",
- "MANIFEST-3x",
- "LOC",
- "LOCKx",
- "LO",
- "LOGx",
- "18446744073709551616.log",
- "184467440737095516150.log",
- "100",
- "100.",
- "100.lop"
- };
+ static const char* errors[] = {"",
+ "foo",
+ "foo-dx-100.log",
+ ".log",
+ "",
+ "manifest",
+ "CURREN",
+ "CURRENTX",
+ "MANIFES",
+ "MANIFEST",
+ "MANIFEST-",
+ "XMANIFEST-3",
+ "MANIFEST-3x",
+ "LOC",
+ "LOCKx",
+ "LO",
+ "LOGx",
+ "18446744073709551616.log",
+ "184467440737095516150.log",
+ "100",
+ "100.",
+ "100.lop"};
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
std::string f = errors[i];
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <stdio.h>
+
#include "leveldb/dumpfile.h"
#include "leveldb/env.h"
#include "leveldb/status.h"
} // namespace leveldb
static void Usage() {
- fprintf(
- stderr,
- "Usage: leveldbutil command...\n"
- " dump files... -- dump contents of specified files\n"
- );
+ fprintf(stderr,
+ "Usage: leveldbutil command...\n"
+ " dump files... -- dump contents of specified files\n");
}
int main(int argc, char** argv) {
} else {
std::string command = argv[1];
if (command == "dump") {
- ok = leveldb::HandleDumpCommand(env, argv+2, argc-2);
+ ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2);
} else {
Usage();
ok = false;
#include "db/log_reader.h"
#include <stdio.h>
+
#include "leveldb/env.h"
#include "util/coding.h"
#include "util/crc32c.h"
namespace leveldb {
namespace log {
-Reader::Reporter::~Reporter() {
-}
+Reader::Reporter::~Reporter() {}
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset)
last_record_offset_(0),
end_of_buffer_offset_(0),
initial_offset_(initial_offset),
- resyncing_(initial_offset > 0) {
-}
+ resyncing_(initial_offset > 0) {}
-Reader::~Reader() {
- delete[] backing_store_;
-}
+Reader::~Reader() { delete[] backing_store_; }
bool Reader::SkipToInitialBlock() {
const size_t offset_in_block = initial_offset_ % kBlockSize;
return false;
}
-uint64_t Reader::LastRecordOffset() {
- return last_record_offset_;
-}
+uint64_t Reader::LastRecordOffset() { return last_record_offset_; }
void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
ReportDrop(bytes, Status::Corruption(reason));
Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset);
+ Reader(const Reader&) = delete;
+ Reader& operator=(const Reader&) = delete;
+
~Reader();
// Read the next record into *record. Returns true if read
uint64_t LastRecordOffset();
private:
- SequentialFile* const file_;
- Reporter* const reporter_;
- bool const checksum_;
- char* const backing_store_;
- Slice buffer_;
- bool eof_; // Last Read() indicated EOF by returning < kBlockSize
-
- // Offset of the last record returned by ReadRecord.
- uint64_t last_record_offset_;
- // Offset of the first location past the end of buffer_.
- uint64_t end_of_buffer_offset_;
-
- // Offset at which to start looking for the first record to return
- uint64_t const initial_offset_;
-
- // True if we are resynchronizing after a seek (initial_offset_ > 0). In
- // particular, a run of kMiddleType and kLastType records can be silently
- // skipped in this mode
- bool resyncing_;
-
// Extend record types with the following special values
enum {
kEof = kMaxRecordType + 1,
void ReportCorruption(uint64_t bytes, const char* reason);
void ReportDrop(uint64_t bytes, const Status& reason);
- // No copying allowed
- Reader(const Reader&);
- void operator=(const Reader&);
+ SequentialFile* const file_;
+ Reporter* const reporter_;
+ bool const checksum_;
+ char* const backing_store_;
+ Slice buffer_;
+ bool eof_; // Last Read() indicated EOF by returning < kBlockSize
+
+ // Offset of the last record returned by ReadRecord.
+ uint64_t last_record_offset_;
+ // Offset of the first location past the end of buffer_.
+ uint64_t end_of_buffer_offset_;
+
+ // Offset at which to start looking for the first record to return
+ uint64_t const initial_offset_;
+
+ // True if we are resynchronizing after a seek (initial_offset_ > 0). In
+ // particular, a run of kMiddleType and kLastType records can be silently
+ // skipped in this mode
+ bool resyncing_;
};
} // namespace log
}
class LogTest {
- private:
- class StringDest : public WritableFile {
- public:
- std::string contents_;
-
- virtual Status Close() { return Status::OK(); }
- virtual Status Flush() { return Status::OK(); }
- virtual Status Sync() { return Status::OK(); }
- virtual Status Append(const Slice& slice) {
- contents_.append(slice.data(), slice.size());
- return Status::OK();
- }
- };
-
- class StringSource : public SequentialFile {
- public:
- Slice contents_;
- bool force_error_;
- bool returned_partial_;
- StringSource() : force_error_(false), returned_partial_(false) { }
-
- virtual Status Read(size_t n, Slice* result, char* scratch) {
- ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
-
- if (force_error_) {
- force_error_ = false;
- returned_partial_ = true;
- return Status::Corruption("read error");
- }
-
- if (contents_.size() < n) {
- n = contents_.size();
- returned_partial_ = true;
- }
- *result = Slice(contents_.data(), n);
- contents_.remove_prefix(n);
- return Status::OK();
- }
-
- virtual Status Skip(uint64_t n) {
- if (n > contents_.size()) {
- contents_.clear();
- return Status::NotFound("in-memory file skipped past end");
- }
-
- contents_.remove_prefix(n);
-
- return Status::OK();
- }
- };
-
- class ReportCollector : public Reader::Reporter {
- public:
- size_t dropped_bytes_;
- std::string message_;
-
- ReportCollector() : dropped_bytes_(0) { }
- virtual void Corruption(size_t bytes, const Status& status) {
- dropped_bytes_ += bytes;
- message_.append(status.ToString());
- }
- };
-
- StringDest dest_;
- StringSource source_;
- ReportCollector report_;
- bool reading_;
- Writer* writer_;
- Reader* reader_;
-
- // Record metadata for testing initial offset functionality
- static size_t initial_offset_record_sizes_[];
- static uint64_t initial_offset_last_record_offsets_[];
- static int num_initial_offset_records_;
-
public:
- LogTest() : reading_(false),
- writer_(new Writer(&dest_)),
- reader_(new Reader(&source_, &report_, true/*checksum*/,
- 0/*initial_offset*/)) {
- }
+ LogTest()
+ : reading_(false),
+ writer_(new Writer(&dest_)),
+ reader_(new Reader(&source_, &report_, true /*checksum*/,
+ 0 /*initial_offset*/)) {}
~LogTest() {
delete writer_;
writer_->AddRecord(Slice(msg));
}
- size_t WrittenBytes() const {
- return dest_.contents_.size();
- }
+ size_t WrittenBytes() const { return dest_.contents_.size(); }
std::string Read() {
if (!reading_) {
void FixChecksum(int header_offset, int len) {
// Compute crc of type/len/data
- uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
+ uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len);
crc = crc32c::Mask(crc);
EncodeFixed32(&dest_.contents_[header_offset], crc);
}
- void ForceError() {
- source_.force_error_ = true;
- }
+ void ForceError() { source_.force_error_ = true; }
- size_t DroppedBytes() const {
- return report_.dropped_bytes_;
- }
+ size_t DroppedBytes() const { return report_.dropped_bytes_; }
- std::string ReportMessage() const {
- return report_.message_;
- }
+ std::string ReportMessage() const { return report_.message_; }
// Returns OK iff recorded error message contains "msg"
std::string MatchError(const std::string& msg) const {
void StartReadingAt(uint64_t initial_offset) {
delete reader_;
- reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
+ reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset);
}
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
WriteInitialOffsetLog();
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
- Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
+ Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/,
WrittenBytes() + offset_past_end);
Slice record;
std::string scratch;
WriteInitialOffsetLog();
reading_ = true;
source_.contents_ = Slice(dest_.contents_);
- Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
- initial_offset);
+ Reader* offset_reader =
+ new Reader(&source_, &report_, true /*checksum*/, initial_offset);
// Read all records from expected_record_offset through the last one.
ASSERT_LT(expected_record_offset, num_initial_offset_records_);
}
delete offset_reader;
}
+
+ private:
+ class StringDest : public WritableFile {
+ public:
+ virtual Status Close() { return Status::OK(); }
+ virtual Status Flush() { return Status::OK(); }
+ virtual Status Sync() { return Status::OK(); }
+ virtual Status Append(const Slice& slice) {
+ contents_.append(slice.data(), slice.size());
+ return Status::OK();
+ }
+
+ std::string contents_;
+ };
+
+ class StringSource : public SequentialFile {
+ public:
+ StringSource() : force_error_(false), returned_partial_(false) {}
+
+ virtual Status Read(size_t n, Slice* result, char* scratch) {
+ ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
+
+ if (force_error_) {
+ force_error_ = false;
+ returned_partial_ = true;
+ return Status::Corruption("read error");
+ }
+
+ if (contents_.size() < n) {
+ n = contents_.size();
+ returned_partial_ = true;
+ }
+ *result = Slice(contents_.data(), n);
+ contents_.remove_prefix(n);
+ return Status::OK();
+ }
+
+ virtual Status Skip(uint64_t n) {
+ if (n > contents_.size()) {
+ contents_.clear();
+ return Status::NotFound("in-memory file skipped past end");
+ }
+
+ contents_.remove_prefix(n);
+
+ return Status::OK();
+ }
+
+ Slice contents_;
+ bool force_error_;
+ bool returned_partial_;
+ };
+
+ class ReportCollector : public Reader::Reporter {
+ public:
+ ReportCollector() : dropped_bytes_(0) {}
+ virtual void Corruption(size_t bytes, const Status& status) {
+ dropped_bytes_ += bytes;
+ message_.append(status.ToString());
+ }
+
+ size_t dropped_bytes_;
+ std::string message_;
+ };
+
+ // Record metadata for testing initial offset functionality
+ static size_t initial_offset_record_sizes_[];
+ static uint64_t initial_offset_last_record_offsets_[];
+ static int num_initial_offset_records_;
+
+ StringDest dest_;
+ StringSource source_;
+ ReportCollector report_;
+ bool reading_;
+ Writer* writer_;
+ Reader* reader_;
};
-size_t LogTest::initial_offset_record_sizes_[] =
- {10000, // Two sizable records in first block
- 10000,
- 2 * log::kBlockSize - 1000, // Span three blocks
- 1,
- 13716, // Consume all but two bytes of block 3.
- log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
- };
-
-uint64_t LogTest::initial_offset_last_record_offsets_[] =
- {0,
- kHeaderSize + 10000,
- 2 * (kHeaderSize + 10000),
- 2 * (kHeaderSize + 10000) +
- (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
- 2 * (kHeaderSize + 10000) +
- (2 * log::kBlockSize - 1000) + 3 * kHeaderSize
- + kHeaderSize + 1,
- 3 * log::kBlockSize,
- };
+size_t LogTest::initial_offset_record_sizes_[] = {
+ 10000, // Two sizable records in first block
+ 10000,
+ 2 * log::kBlockSize - 1000, // Span three blocks
+ 1,
+ 13716, // Consume all but two bytes of block 3.
+ log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
+};
+
+uint64_t LogTest::initial_offset_last_record_offsets_[] = {
+ 0,
+ kHeaderSize + 10000,
+ 2 * (kHeaderSize + 10000),
+ 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
+ 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize +
+ kHeaderSize + 1,
+ 3 * log::kBlockSize,
+};
// LogTest::initial_offset_last_record_offsets_ must be defined before this.
int LogTest::num_initial_offset_records_ =
- sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
+ sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
-TEST(LogTest, Empty) {
- ASSERT_EQ("EOF", Read());
-}
+TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
TEST(LogTest, ReadWrite) {
Write("foo");
TEST(LogTest, MarginalTrailer) {
// Make a trailer that is exactly the same length as an empty record.
- const int n = kBlockSize - 2*kHeaderSize;
+ const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
Write("");
TEST(LogTest, MarginalTrailer2) {
// Make a trailer that is exactly the same length as an empty record.
- const int n = kBlockSize - 2*kHeaderSize;
+ const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
Write("bar");
}
TEST(LogTest, ShortTrailer) {
- const int n = kBlockSize - 2*kHeaderSize + 4;
+ const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
Write("");
}
TEST(LogTest, AlignedEof) {
- const int n = kBlockSize - 2*kHeaderSize + 4;
+ const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
ASSERT_EQ(BigString("foo", n), Read());
TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
Write("foo");
- ShrinkSize(4); // Drop all payload as well as a header byte
+ ShrinkSize(4); // Drop all payload as well as a header byte
ASSERT_EQ("EOF", Read());
// Truncated last record is ignored, not treated as an error.
ASSERT_EQ(0, DroppedBytes());
// If initial_offset points to a record after first(R1) but before first(R2)
// incomplete fragment errors are not actual errors, and must be suppressed
// until a new first or full record is encountered.
- Write(BigString("foo", 3*kBlockSize));
+ Write(BigString("foo", 3 * kBlockSize));
Write("correct");
StartReadingAt(kBlockSize);
Write("correct");
// Wipe the middle block
- for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
+ for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
SetByte(offset, 'x');
}
ASSERT_EQ("correct", Read());
ASSERT_EQ("EOF", Read());
const size_t dropped = DroppedBytes();
- ASSERT_LE(dropped, 2*kBlockSize + 100);
- ASSERT_GE(dropped, 2*kBlockSize);
+ ASSERT_LE(dropped, 2 * kBlockSize + 100);
+ ASSERT_GE(dropped, 2 * kBlockSize);
}
-TEST(LogTest, ReadStart) {
- CheckInitialOffsetRecord(0, 0);
-}
+TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
-TEST(LogTest, ReadSecondOneOff) {
- CheckInitialOffsetRecord(1, 1);
-}
+TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
-TEST(LogTest, ReadSecondTenThousand) {
- CheckInitialOffsetRecord(10000, 1);
-}
+TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
-TEST(LogTest, ReadSecondStart) {
- CheckInitialOffsetRecord(10007, 1);
-}
+TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
-TEST(LogTest, ReadThirdOneOff) {
- CheckInitialOffsetRecord(10008, 2);
-}
+TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
-TEST(LogTest, ReadThirdStart) {
- CheckInitialOffsetRecord(20014, 2);
-}
+TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
-TEST(LogTest, ReadFourthOneOff) {
- CheckInitialOffsetRecord(20015, 3);
-}
+TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
TEST(LogTest, ReadFourthFirstBlockTrailer) {
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
}
-TEST(LogTest, ReadEnd) {
- CheckOffsetPastEndReturnsNoRecords(0);
-}
+TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
-TEST(LogTest, ReadPastEnd) {
- CheckOffsetPastEndReturnsNoRecords(5);
-}
+TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
} // namespace log
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
#include "db/log_writer.h"
#include <stdint.h>
+
#include "leveldb/env.h"
#include "util/coding.h"
#include "util/crc32c.h"
}
}
-Writer::Writer(WritableFile* dest)
- : dest_(dest),
- block_offset_(0) {
+Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) {
InitTypeCrc(type_crc_);
}
InitTypeCrc(type_crc_);
}
-Writer::~Writer() {
-}
+Writer::~Writer() {}
Status Writer::AddRecord(const Slice& slice) {
const char* ptr = slice.data();
// Switch to a new block
if (leftover > 0) {
// Fill the trailer (literal below relies on kHeaderSize being 7)
- assert(kHeaderSize == 7);
+ static_assert(kHeaderSize == 7, "");
dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
}
block_offset_ = 0;
return s;
}
-Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) {
- assert(n <= 0xffff); // Must fit in two bytes
- assert(block_offset_ + kHeaderSize + n <= kBlockSize);
+Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr,
+ size_t length) {
+ assert(length <= 0xffff); // Must fit in two bytes
+ assert(block_offset_ + kHeaderSize + length <= kBlockSize);
// Format the header
char buf[kHeaderSize];
- buf[4] = static_cast<char>(n & 0xff);
- buf[5] = static_cast<char>(n >> 8);
+ buf[4] = static_cast<char>(length & 0xff);
+ buf[5] = static_cast<char>(length >> 8);
buf[6] = static_cast<char>(t);
// Compute the crc of the record type and the payload.
- uint32_t crc = crc32c::Extend(type_crc_[t], ptr, n);
- crc = crc32c::Mask(crc); // Adjust for storage
+ uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length);
+ crc = crc32c::Mask(crc); // Adjust for storage
EncodeFixed32(buf, crc);
// Write the header and the payload
Status s = dest_->Append(Slice(buf, kHeaderSize));
if (s.ok()) {
- s = dest_->Append(Slice(ptr, n));
+ s = dest_->Append(Slice(ptr, length));
if (s.ok()) {
s = dest_->Flush();
}
}
- block_offset_ += kHeaderSize + n;
+ block_offset_ += kHeaderSize + length;
return s;
}
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
#include <stdint.h>
+
#include "db/log_format.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
// "*dest" must remain live while this Writer is in use.
Writer(WritableFile* dest, uint64_t dest_length);
+ Writer(const Writer&) = delete;
+ Writer& operator=(const Writer&) = delete;
+
~Writer();
Status AddRecord(const Slice& slice);
private:
+ Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
+
WritableFile* dest_;
- int block_offset_; // Current offset in block
+ int block_offset_; // Current offset in block
// crc32c values for all supported record types. These are
// pre-computed to reduce the overhead of computing the crc of the
// record type stored in the header.
uint32_t type_crc_[kMaxRecordType + 1];
-
- Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
-
- // No copying allowed
- Writer(const Writer&);
- void operator=(const Writer&);
};
} // namespace log
return Slice(p, len);
}
-MemTable::MemTable(const InternalKeyComparator& cmp)
- : comparator_(cmp),
- refs_(0),
- table_(comparator_, &arena_) {
-}
+MemTable::MemTable(const InternalKeyComparator& comparator)
+ : comparator_(comparator), refs_(0), table_(comparator_, &arena_) {}
-MemTable::~MemTable() {
- assert(refs_ == 0);
-}
+MemTable::~MemTable() { assert(refs_ == 0); }
size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
-int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr)
- const {
+int MemTable::KeyComparator::operator()(const char* aptr,
+ const char* bptr) const {
// Internal keys are encoded as length-prefixed strings.
Slice a = GetLengthPrefixedSlice(aptr);
Slice b = GetLengthPrefixedSlice(bptr);
return scratch->data();
}
-class MemTableIterator: public Iterator {
+class MemTableIterator : public Iterator {
public:
- explicit MemTableIterator(MemTable::Table* table) : iter_(table) { }
+ explicit MemTableIterator(MemTable::Table* table) : iter_(table) {}
virtual bool Valid() const { return iter_.Valid(); }
virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
private:
MemTable::Table::Iterator iter_;
- std::string tmp_; // For passing to EncodeKey
+ std::string tmp_; // For passing to EncodeKey
// No copying allowed
MemTableIterator(const MemTableIterator&);
void operator=(const MemTableIterator&);
};
-Iterator* MemTable::NewIterator() {
- return new MemTableIterator(&table_);
-}
+Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); }
-void MemTable::Add(SequenceNumber s, ValueType type,
- const Slice& key,
+void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
const Slice& value) {
// Format of an entry is concatenation of:
// key_size : varint32 of internal_key.size()
size_t key_size = key.size();
size_t val_size = value.size();
size_t internal_key_size = key_size + 8;
- const size_t encoded_len =
- VarintLength(internal_key_size) + internal_key_size +
- VarintLength(val_size) + val_size;
+ const size_t encoded_len = VarintLength(internal_key_size) +
+ internal_key_size + VarintLength(val_size) +
+ val_size;
char* buf = arena_.Allocate(encoded_len);
char* p = EncodeVarint32(buf, internal_key_size);
memcpy(p, key.data(), key_size);
// all entries with overly large sequence numbers.
const char* entry = iter.key();
uint32_t key_length;
- const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length);
+ const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
if (comparator_.comparator.user_comparator()->Compare(
- Slice(key_ptr, key_length - 8),
- key.user_key()) == 0) {
+ Slice(key_ptr, key_length - 8), key.user_key()) == 0) {
// Correct user key
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
switch (static_cast<ValueType>(tag & 0xff)) {
#define STORAGE_LEVELDB_DB_MEMTABLE_H_
#include <string>
-#include "leveldb/db.h"
+
#include "db/dbformat.h"
#include "db/skiplist.h"
+#include "leveldb/db.h"
#include "util/arena.h"
namespace leveldb {
// is zero and the caller must call Ref() at least once.
explicit MemTable(const InternalKeyComparator& comparator);
+ MemTable(const MemTable&) = delete;
+ MemTable& operator=(const MemTable&) = delete;
+
// Increase reference count.
void Ref() { ++refs_; }
// Add an entry into memtable that maps key to value at the
// specified sequence number and with the specified type.
// Typically value will be empty if type==kTypeDeletion.
- void Add(SequenceNumber seq, ValueType type,
- const Slice& key,
+ void Add(SequenceNumber seq, ValueType type, const Slice& key,
const Slice& value);
// If memtable contains a value for key, store it in *value and return true.
bool Get(const LookupKey& key, std::string* value, Status* s);
private:
- ~MemTable(); // Private since only Unref() should be used to delete it
+ friend class MemTableIterator;
+ friend class MemTableBackwardIterator;
struct KeyComparator {
const InternalKeyComparator comparator;
- explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
+ explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
int operator()(const char* a, const char* b) const;
};
- friend class MemTableIterator;
- friend class MemTableBackwardIterator;
typedef SkipList<const char*, KeyComparator> Table;
+ ~MemTable(); // Private since only Unref() should be used to delete it
+
KeyComparator comparator_;
int refs_;
Arena arena_;
Table table_;
-
- // No copying allowed
- MemTable(const MemTable&);
- void operator=(const MemTable&);
};
} // namespace leveldb
std::string current;
ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t));
size_t len = current.size();
- if (len > 0 && current[len-1] == '\n') {
+ if (len > 0 && current[len - 1] == '\n') {
current.resize(len - 1);
}
return dbname_ + "/" + current;
}
- std::string LogName(uint64_t number) {
- return LogFileName(dbname_, number);
- }
+ std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
size_t DeleteLogFiles() {
// Linux allows unlinking open files, but Windows does not.
return logs.size();
}
- void DeleteManifestFile() {
- ASSERT_OK(env_->DeleteFile(ManifestFileName()));
- }
+ void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
- uint64_t FirstLogFile() {
- return GetFiles(kLogFile)[0];
- }
+ uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
std::vector<uint64_t> GetFiles(FileType t) {
std::vector<std::string> filenames;
return result;
}
- int NumLogs() {
- return GetFiles(kLogFile).size();
- }
+ int NumLogs() { return GetFiles(kLogFile).size(); }
- int NumTables() {
- return GetFiles(kTableFile).size();
- }
+ int NumTables() { return GetFiles(kTableFile).size(); }
uint64_t FileSize(const std::string& fname) {
uint64_t result;
return result;
}
- void CompactMemTable() {
- dbfull()->TEST_CompactMemTable();
- }
+ void CompactMemTable() { dbfull()->TEST_CompactMemTable(); }
// Directly construct a log file that sets key to val.
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
uint64_t len = FileSize(old_manifest);
WritableFile* file;
ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
- std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
+ std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
ASSERT_OK(file->Append(zeroes));
ASSERT_OK(file->Flush());
delete file;
// Force creation of multiple memtables by reducing the write buffer size.
Options opt;
opt.reuse_logs = true;
- opt.write_buffer_size = (kNum*100) / 2;
+ opt.write_buffer_size = (kNum * 100) / 2;
Open(&opt);
ASSERT_LE(2, NumTables());
ASSERT_EQ(1, NumLogs());
// Make a bunch of uncompacted log files.
uint64_t old_log = FirstLogFile();
- MakeLogFile(old_log+1, 1000, "hello", "world");
- MakeLogFile(old_log+2, 1001, "hi", "there");
- MakeLogFile(old_log+3, 1002, "foo", "bar2");
+ MakeLogFile(old_log + 1, 1000, "hello", "world");
+ MakeLogFile(old_log + 2, 1001, "hi", "there");
+ MakeLogFile(old_log + 3, 1002, "foo", "bar2");
// Recover and check that all log files were processed.
Open();
ASSERT_LE(1, NumTables());
ASSERT_EQ(1, NumLogs());
uint64_t new_log = FirstLogFile();
- ASSERT_LE(old_log+3, new_log);
+ ASSERT_LE(old_log + 3, new_log);
ASSERT_EQ("bar2", Get("foo"));
ASSERT_EQ("world", Get("hello"));
ASSERT_EQ("there", Get("hi"));
// Check that introducing an older log file does not cause it to be re-read.
Close();
- MakeLogFile(old_log+1, 2000, "hello", "stale write");
+ MakeLogFile(old_log + 1, 2000, "hello", "stale write");
Open();
ASSERT_LE(1, NumTables());
ASSERT_EQ(1, NumLogs());
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
"recovered %d files; %llu bytes. "
"Some data may have been lost. "
"****",
- dbname_.c_str(),
- static_cast<int>(tables_.size()),
- bytes);
+ dbname_.c_str(), static_cast<int>(tables_.size()), bytes);
}
return status;
}
SequenceNumber max_sequence;
};
- std::string const dbname_;
- Env* const env_;
- InternalKeyComparator const icmp_;
- InternalFilterPolicy const ipolicy_;
- Options const options_;
- bool owns_info_log_;
- bool owns_cache_;
- TableCache* table_cache_;
- VersionEdit edit_;
-
- std::vector<std::string> manifests_;
- std::vector<uint64_t> table_numbers_;
- std::vector<uint64_t> logs_;
- std::vector<TableInfo> tables_;
- uint64_t next_file_number_;
-
Status FindFiles() {
std::vector<std::string> filenames;
Status status = env_->GetChildren(dbname_, &filenames);
Status status = ConvertLogToTable(logs_[i]);
if (!status.ok()) {
Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
- (unsigned long long) logs_[i],
- status.ToString().c_str());
+ (unsigned long long)logs_[i], status.ToString().c_str());
}
ArchiveFile(logname);
}
virtual void Corruption(size_t bytes, const Status& s) {
// We print error messages for corruption, but continue repairing.
Log(info_log, "Log #%llu: dropping %d bytes; %s",
- (unsigned long long) lognum,
- static_cast<int>(bytes),
+ (unsigned long long)lognum, static_cast<int>(bytes),
s.ToString().c_str());
}
};
// corruptions cause entire commits to be skipped instead of
// propagating bad information (like overly large sequence
// numbers).
- log::Reader reader(lfile, &reporter, false/*do not checksum*/,
- 0/*initial_offset*/);
+ log::Reader reader(lfile, &reporter, false /*do not checksum*/,
+ 0 /*initial_offset*/);
// Read all the records and add to a memtable
std::string scratch;
int counter = 0;
while (reader.ReadRecord(&record, &scratch)) {
if (record.size() < 12) {
- reporter.Corruption(
- record.size(), Status::Corruption("log record too small"));
+ reporter.Corruption(record.size(),
+ Status::Corruption("log record too small"));
continue;
}
WriteBatchInternal::SetContents(&batch, record);
counter += WriteBatchInternal::Count(&batch);
} else {
Log(options_.info_log, "Log #%llu: ignoring %s",
- (unsigned long long) log,
- status.ToString().c_str());
+ (unsigned long long)log, status.ToString().c_str());
status = Status::OK(); // Keep going with rest of file
}
}
}
}
Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
- (unsigned long long) log,
- counter,
- (unsigned long long) meta.number,
+ (unsigned long long)log, counter, (unsigned long long)meta.number,
status.ToString().c_str());
return status;
}
ArchiveFile(TableFileName(dbname_, number));
ArchiveFile(SSTTableFileName(dbname_, number));
Log(options_.info_log, "Table #%llu: dropped: %s",
- (unsigned long long) t.meta.number,
- status.ToString().c_str());
+ (unsigned long long)t.meta.number, status.ToString().c_str());
return;
}
Slice key = iter->key();
if (!ParseInternalKey(key, &parsed)) {
Log(options_.info_log, "Table #%llu: unparsable key %s",
- (unsigned long long) t.meta.number,
- EscapeString(key).c_str());
+ (unsigned long long)t.meta.number, EscapeString(key).c_str());
continue;
}
}
delete iter;
Log(options_.info_log, "Table #%llu: %d entries %s",
- (unsigned long long) t.meta.number,
- counter,
- status.ToString().c_str());
+ (unsigned long long)t.meta.number, counter, status.ToString().c_str());
if (status.ok()) {
tables_.push_back(t);
s = env_->RenameFile(copy, orig);
if (s.ok()) {
Log(options_.info_log, "Table #%llu: %d entries repaired",
- (unsigned long long) t.meta.number, counter);
+ (unsigned long long)t.meta.number, counter);
tables_.push_back(t);
}
}
for (size_t i = 0; i < tables_.size(); i++) {
// TODO(opt): separate out into multiple levels
const TableInfo& t = tables_[i];
- edit_.AddFile(0, t.meta.number, t.meta.file_size,
- t.meta.smallest, t.meta.largest);
+ edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest,
+ t.meta.largest);
}
- //fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
+ // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
{
log::Writer log(file);
std::string record;
new_file.append("/");
new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
Status s = env_->RenameFile(fname, new_file);
- Log(options_.info_log, "Archiving %s: %s\n",
- fname.c_str(), s.ToString().c_str());
+ Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
+ s.ToString().c_str());
}
+
+ const std::string dbname_;
+ Env* const env_;
+ InternalKeyComparator const icmp_;
+ InternalFilterPolicy const ipolicy_;
+ const Options options_;
+ bool owns_info_log_;
+ bool owns_cache_;
+ TableCache* table_cache_;
+ VersionEdit edit_;
+
+ std::vector<std::string> manifests_;
+ std::vector<uint64_t> table_numbers_;
+ std::vector<uint64_t> logs_;
+ std::vector<TableInfo> tables_;
+ uint64_t next_file_number_;
};
} // namespace
class Arena;
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
class SkipList {
private:
struct Node;
// must remain allocated for the lifetime of the skiplist object.
explicit SkipList(Comparator cmp, Arena* arena);
+ SkipList(const SkipList&) = delete;
+ SkipList& operator=(const SkipList&) = delete;
+
// Insert key into the list.
// REQUIRES: nothing that compares equal to key is currently in the list.
void Insert(const Key& key);
private:
enum { kMaxHeight = 12 };
- // Immutable after construction
- Comparator const compare_;
- Arena* const arena_; // Arena used for allocations of nodes
-
- Node* const head_;
-
- // Modified only by Insert(). Read racily by readers, but stale
- // values are ok.
- std::atomic<int> max_height_; // Height of the entire list
-
inline int GetMaxHeight() const {
return max_height_.load(std::memory_order_relaxed);
}
- // Read/written only by Insert().
- Random rnd_;
-
Node* NewNode(const Key& key, int height);
int RandomHeight();
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
// Return head_ if list is empty.
Node* FindLast() const;
- // No copying allowed
- SkipList(const SkipList&);
- void operator=(const SkipList&);
+ // Immutable after construction
+ Comparator const compare_;
+ Arena* const arena_; // Arena used for allocations of nodes
+
+ Node* const head_;
+
+ // Modified only by Insert(). Read racily by readers, but stale
+ // values are ok.
+ std::atomic<int> max_height_; // Height of the entire list
+
+ // Read/written only by Insert().
+ Random rnd_;
};
// Implementation details follow
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
struct SkipList<Key, Comparator>::Node {
- explicit Node(const Key& k) : key(k) { }
+ explicit Node(const Key& k) : key(k) {}
Key const key;
std::atomic<Node*> next_[1];
};
-template<typename Key, class Comparator>
-typename SkipList<Key, Comparator>::Node*
-SkipList<Key, Comparator>::NewNode(const Key& key, int height) {
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
+ const Key& key, int height) {
char* const node_memory = arena_->AllocateAligned(
sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
return new (node_memory) Node(key);
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
list_ = list;
node_ = nullptr;
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
return node_ != nullptr;
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
assert(Valid());
return node_->key;
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Next() {
assert(Valid());
node_ = node_->Next(0);
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Prev() {
// Instead of using explicit "prev" links, we just search for the
// last node that falls before key.
}
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
node_ = list_->FindGreaterOrEqual(target, nullptr);
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
node_ = list_->head_->Next(0);
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
node_ = list_->FindLast();
if (node_ == list_->head_) {
}
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
int SkipList<Key, Comparator>::RandomHeight() {
// Increase height with probability 1 in kBranching
static const unsigned int kBranching = 4;
return height;
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
// null n is considered infinite
return (n != nullptr) && (compare_(n->key, key) < 0);
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node*
SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
Node** prev) const {
}
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node*
SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
Node* x = head_;
}
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
const {
Node* x = head_;
}
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
: compare_(cmp),
arena_(arena),
}
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
void SkipList<Key, Comparator>::Insert(const Key& key) {
// TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
// here since Insert() is externally synchronized.
}
}
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
bool SkipList<Key, Comparator>::Contains(const Key& key) const {
Node* x = FindGreaterOrEqual(key, nullptr);
if (x != nullptr && Equal(key, x->key)) {
}
};
-class SkipTest { };
+class SkipTest {};
TEST(SkipTest, Empty) {
Arena arena;
// Compare against model iterator
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
- model_iter != keys.rend();
- ++model_iter) {
+ model_iter != keys.rend(); ++model_iter) {
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*model_iter, iter.key());
iter.Prev();
static uint64_t hash(Key key) { return key & 0xff; }
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
- uint64_t data[2] = { k, g };
+ uint64_t data[2] = {k, g};
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
}
static Key MakeKey(uint64_t k, uint64_t g) {
- assert(sizeof(Key) == sizeof(uint64_t));
+ static_assert(sizeof(Key) == sizeof(uint64_t), "");
assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
assert(g <= 0xffffffffu);
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
void Set(int k, int v) {
generation[k].store(v, std::memory_order_release);
}
- int Get(int k) {
- return generation[k].load(std::memory_order_acquire);
- }
+ int Get(int k) { return generation[k].load(std::memory_order_acquire); }
State() {
for (int k = 0; k < K; k++) {
SkipList<Key, Comparator> list_;
public:
- ConcurrentTest() : list_(Comparator(), &arena_) { }
+ ConcurrentTest() : list_(Comparator(), &arena_) {}
// REQUIRES: External synchronization
void WriteStep(Random* rnd) {
// Note that generation 0 is never inserted, so it is ok if
// <*,0,*> is missing.
ASSERT_TRUE((gen(pos) == 0) ||
- (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
- ) << "key: " << key(pos)
- << "; gen: " << gen(pos)
- << "; initgen: "
- << initial_state.Get(key(pos));
+ (gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
+ << "key: " << key(pos) << "; gen: " << gen(pos)
+ << "; initgen: " << initial_state.Get(key(pos));
// Advance to next key in the valid key space
if (key(pos) < key(current)) {
int seed_;
std::atomic<bool> quit_flag_;
- enum ReaderState {
- STARTING,
- RUNNING,
- DONE
- };
+ enum ReaderState { STARTING, RUNNING, DONE };
explicit TestState(int s)
- : seed_(s),
- quit_flag_(false),
- state_(STARTING),
- state_cv_(&mu_) {}
+ : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
}
bool empty() const { return head_.next_ == &head_; }
- SnapshotImpl* oldest() const { assert(!empty()); return head_.next_; }
- SnapshotImpl* newest() const { assert(!empty()); return head_.prev_; }
+ SnapshotImpl* oldest() const {
+ assert(!empty());
+ return head_.next_;
+ }
+ SnapshotImpl* newest() const {
+ assert(!empty());
+ return head_.prev_;
+ }
// Creates a SnapshotImpl and appends it to the end of the list.
SnapshotImpl* New(SequenceNumber sequence_number) {
cache->Release(h);
}
-TableCache::TableCache(const std::string& dbname,
- const Options& options,
+TableCache::TableCache(const std::string& dbname, const Options& options,
int entries)
: env_(options.env),
dbname_(dbname),
options_(options),
- cache_(NewLRUCache(entries)) {
-}
+ cache_(NewLRUCache(entries)) {}
-TableCache::~TableCache() {
- delete cache_;
-}
+TableCache::~TableCache() { delete cache_; }
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
Cache::Handle** handle) {
}
Iterator* TableCache::NewIterator(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
+ uint64_t file_number, uint64_t file_size,
Table** tableptr) {
if (tableptr != nullptr) {
*tableptr = nullptr;
return result;
}
-Status TableCache::Get(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- const Slice& k,
- void* arg,
- void (*saver)(void*, const Slice&, const Slice&)) {
+Status TableCache::Get(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, const Slice& k, void* arg,
+ void (*handle_result)(void*, const Slice&,
+ const Slice&)) {
Cache::Handle* handle = nullptr;
Status s = FindTable(file_number, file_size, &handle);
if (s.ok()) {
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
- s = t->InternalGet(options, k, arg, saver);
+ s = t->InternalGet(options, k, arg, handle_result);
cache_->Release(handle);
}
return s;
#ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
#define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
-#include <string>
#include <stdint.h>
+
+#include <string>
+
#include "db/dbformat.h"
#include "leveldb/cache.h"
#include "leveldb/table.h"
// underlies the returned iterator. The returned "*tableptr" object is owned
// by the cache and should not be deleted, and is valid for as long as the
// returned iterator is live.
- Iterator* NewIterator(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- Table** tableptr = nullptr);
+ Iterator* NewIterator(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, Table** tableptr = nullptr);
// If a seek to internal key "k" in specified file finds an entry,
// call (*handle_result)(arg, found_key, found_value).
- Status Get(const ReadOptions& options,
- uint64_t file_number,
- uint64_t file_size,
- const Slice& k,
- void* arg,
+ Status Get(const ReadOptions& options, uint64_t file_number,
+ uint64_t file_size, const Slice& k, void* arg,
void (*handle_result)(void*, const Slice&, const Slice&));
// Evict any entry for the specified file number
void Evict(uint64_t file_number);
private:
+ Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
+
Env* const env_;
const std::string dbname_;
const Options& options_;
Cache* cache_;
-
- Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
};
} // namespace leveldb
// Tag numbers for serialized VersionEdit. These numbers are written to
// disk and should not be changed.
enum Tag {
- kComparator = 1,
- kLogNumber = 2,
- kNextFileNumber = 3,
- kLastSequence = 4,
- kCompactPointer = 5,
- kDeletedFile = 6,
- kNewFile = 7,
+ kComparator = 1,
+ kLogNumber = 2,
+ kNextFileNumber = 3,
+ kLastSequence = 4,
+ kCompactPointer = 5,
+ kDeletedFile = 6,
+ kNewFile = 7,
// 8 was used for large value refs
- kPrevLogNumber = 9
+ kPrevLogNumber = 9
};
void VersionEdit::Clear() {
}
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
- iter != deleted_files_.end();
- ++iter) {
+ iter != deleted_files_.end(); ++iter) {
PutVarint32(dst, kDeletedFile);
PutVarint32(dst, iter->first); // level
PutVarint64(dst, iter->second); // file number
static bool GetLevel(Slice* input, int* level) {
uint32_t v;
- if (GetVarint32(input, &v) &&
- v < config::kNumLevels) {
+ if (GetVarint32(input, &v) && v < config::kNumLevels) {
*level = v;
return true;
} else {
break;
case kCompactPointer:
- if (GetLevel(&input, &level) &&
- GetInternalKey(&input, &key)) {
+ if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) {
compact_pointers_.push_back(std::make_pair(level, key));
} else {
msg = "compaction pointer";
break;
case kDeletedFile:
- if (GetLevel(&input, &level) &&
- GetVarint64(&input, &number)) {
+ if (GetLevel(&input, &level) && GetVarint64(&input, &number)) {
deleted_files_.insert(std::make_pair(level, number));
} else {
msg = "deleted file";
break;
case kNewFile:
- if (GetLevel(&input, &level) &&
- GetVarint64(&input, &f.number) &&
+ if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
GetVarint64(&input, &f.file_size) &&
GetInternalKey(&input, &f.smallest) &&
GetInternalKey(&input, &f.largest)) {
r.append(compact_pointers_[i].second.DebugString());
}
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
- iter != deleted_files_.end();
- ++iter) {
+ iter != deleted_files_.end(); ++iter) {
r.append("\n DeleteFile: ");
AppendNumberTo(&r, iter->first);
r.append(" ");
#include <set>
#include <utility>
#include <vector>
+
#include "db/dbformat.h"
namespace leveldb {
class VersionSet;
struct FileMetaData {
+ FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
+
int refs;
- int allowed_seeks; // Seeks allowed until compaction
+ int allowed_seeks; // Seeks allowed until compaction
uint64_t number;
- uint64_t file_size; // File size in bytes
- InternalKey smallest; // Smallest internal key served by table
- InternalKey largest; // Largest internal key served by table
-
- FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) { }
+ uint64_t file_size; // File size in bytes
+ InternalKey smallest; // Smallest internal key served by table
+ InternalKey largest; // Largest internal key served by table
};
class VersionEdit {
public:
VersionEdit() { Clear(); }
- ~VersionEdit() { }
+ ~VersionEdit() {}
void Clear();
// Add the specified file at the specified number.
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
- void AddFile(int level, uint64_t file,
- uint64_t file_size,
- const InternalKey& smallest,
- const InternalKey& largest) {
+ void AddFile(int level, uint64_t file, uint64_t file_size,
+ const InternalKey& smallest, const InternalKey& largest) {
FileMetaData f;
f.number = file;
f.file_size = file_size;
private:
friend class VersionSet;
- typedef std::set< std::pair<int, uint64_t> > DeletedFileSet;
+ typedef std::set<std::pair<int, uint64_t> > DeletedFileSet;
std::string comparator_;
uint64_t log_number_;
bool has_next_file_number_;
bool has_last_sequence_;
- std::vector< std::pair<int, InternalKey> > compact_pointers_;
+ std::vector<std::pair<int, InternalKey> > compact_pointers_;
DeletedFileSet deleted_files_;
- std::vector< std::pair<int, FileMetaData> > new_files_;
+ std::vector<std::pair<int, FileMetaData> > new_files_;
};
} // namespace leveldb
ASSERT_EQ(encoded, encoded2);
}
-class VersionEditTest { };
+class VersionEditTest {};
TEST(VersionEditTest, EncodeDecode) {
static const uint64_t kBig = 1ull << 50;
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
#include "db/version_set.h"
-#include <algorithm>
#include <stdio.h>
+
+#include <algorithm>
+
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
}
int FindFile(const InternalKeyComparator& icmp,
- const std::vector<FileMetaData*>& files,
- const Slice& key) {
+ const std::vector<FileMetaData*>& files, const Slice& key) {
uint32_t left = 0;
uint32_t right = files.size();
while (left < right) {
return right;
}
-static bool AfterFile(const Comparator* ucmp,
- const Slice* user_key, const FileMetaData* f) {
+static bool AfterFile(const Comparator* ucmp, const Slice* user_key,
+ const FileMetaData* f) {
// null user_key occurs before all keys and is therefore never after *f
return (user_key != nullptr &&
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
}
-static bool BeforeFile(const Comparator* ucmp,
- const Slice* user_key, const FileMetaData* f) {
+static bool BeforeFile(const Comparator* ucmp, const Slice* user_key,
+ const FileMetaData* f) {
// null user_key occurs after all keys and is therefore never before *f
return (user_key != nullptr &&
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
}
-bool SomeFileOverlapsRange(
- const InternalKeyComparator& icmp,
- bool disjoint_sorted_files,
- const std::vector<FileMetaData*>& files,
- const Slice* smallest_user_key,
- const Slice* largest_user_key) {
+bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+ bool disjoint_sorted_files,
+ const std::vector<FileMetaData*>& files,
+ const Slice* smallest_user_key,
+ const Slice* largest_user_key) {
const Comparator* ucmp = icmp.user_comparator();
if (!disjoint_sorted_files) {
// Need to check against all files
uint32_t index = 0;
if (smallest_user_key != nullptr) {
// Find the earliest possible internal key for smallest_user_key
- InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
+ InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,
+ kValueTypeForSeek);
index = FindFile(icmp, files, small_key.Encode());
}
public:
LevelFileNumIterator(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>* flist)
- : icmp_(icmp),
- flist_(flist),
- index_(flist->size()) { // Marks as invalid
- }
- virtual bool Valid() const {
- return index_ < flist_->size();
+ : icmp_(icmp), flist_(flist), index_(flist->size()) { // Marks as invalid
}
+ virtual bool Valid() const { return index_ < flist_->size(); }
virtual void Seek(const Slice& target) {
index_ = FindFile(icmp_, *flist_, target);
}
Slice value() const {
assert(Valid());
EncodeFixed64(value_buf_, (*flist_)[index_]->number);
- EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
+ EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
return Slice(value_buf_, sizeof(value_buf_));
}
virtual Status status() const { return Status::OK(); }
+
private:
const InternalKeyComparator icmp_;
const std::vector<FileMetaData*>* const flist_;
mutable char value_buf_[16];
};
-static Iterator* GetFileIterator(void* arg,
- const ReadOptions& options,
+static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
const Slice& file_value) {
TableCache* cache = reinterpret_cast<TableCache*>(arg);
if (file_value.size() != 16) {
return NewErrorIterator(
Status::Corruption("FileReader invoked with unexpected value"));
} else {
- return cache->NewIterator(options,
- DecodeFixed64(file_value.data()),
+ return cache->NewIterator(options, DecodeFixed64(file_value.data()),
DecodeFixed64(file_value.data() + 8));
}
}
Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
int level) const {
return NewTwoLevelIterator(
- new LevelFileNumIterator(vset_->icmp_, &files_[level]),
- &GetFileIterator, vset_->table_cache_, options);
+ new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator,
+ vset_->table_cache_, options);
}
void Version::AddIterators(const ReadOptions& options,
std::vector<Iterator*>* iters) {
// Merge all level zero files together since they may overlap
for (size_t i = 0; i < files_[0].size(); i++) {
- iters->push_back(
- vset_->table_cache_->NewIterator(
- options, files_[0][i]->number, files_[0][i]->file_size));
+ iters->push_back(vset_->table_cache_->NewIterator(
+ options, files_[0][i]->number, files_[0][i]->file_size));
}
// For levels > 0, we can use a concatenating iterator that sequentially
Slice user_key;
std::string* value;
};
-}
+} // namespace
static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
Saver* s = reinterpret_cast<Saver*>(arg);
ParsedInternalKey parsed_key;
return a->number > b->number;
}
-void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
- void* arg,
+void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
bool (*func)(void*, int, FileMetaData*)) {
// TODO(sanjay): Change Version::Get() to use this function.
const Comparator* ucmp = vset_->icmp_.user_comparator();
}
}
-Status Version::Get(const ReadOptions& options,
- const LookupKey& k,
- std::string* value,
- GetStats* stats) {
+Status Version::Get(const ReadOptions& options, const LookupKey& k,
+ std::string* value, GetStats* stats) {
Slice ikey = k.internal_key();
Slice user_key = k.user_key();
const Comparator* ucmp = vset_->icmp_.user_comparator();
saver.ucmp = ucmp;
saver.user_key = user_key;
saver.value = value;
- s = vset_->table_cache_->Get(options, f->number, f->file_size,
- ikey, &saver, SaveValue);
+ s = vset_->table_cache_->Get(options, f->number, f->file_size, ikey,
+ &saver, SaveValue);
if (!s.ok()) {
return s;
}
switch (saver.state) {
case kNotFound:
- break; // Keep searching in other files
+ break; // Keep searching in other files
case kFound:
return s;
case kDeleted:
return false;
}
-void Version::Ref() {
- ++refs_;
-}
+void Version::Ref() { ++refs_; }
void Version::Unref() {
assert(this != &vset_->dummy_versions_);
}
}
-bool Version::OverlapInLevel(int level,
- const Slice* smallest_user_key,
+bool Version::OverlapInLevel(int level, const Slice* smallest_user_key,
const Slice* largest_user_key) {
return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
smallest_user_key, largest_user_key);
}
-int Version::PickLevelForMemTableOutput(
- const Slice& smallest_user_key,
- const Slice& largest_user_key) {
+int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key,
+ const Slice& largest_user_key) {
int level = 0;
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
// Push to next level if there is no overlap in next level,
}
// Store in "*inputs" all files in "level" that overlap [begin,end]
-void Version::GetOverlappingInputs(
- int level,
- const InternalKey* begin,
- const InternalKey* end,
- std::vector<FileMetaData*>* inputs) {
+void Version::GetOverlappingInputs(int level, const InternalKey* begin,
+ const InternalKey* end,
+ std::vector<FileMetaData*>* inputs) {
assert(level >= 0);
assert(level < config::kNumLevels);
inputs->clear();
user_end = end->user_key();
}
const Comparator* user_cmp = vset_->icmp_.user_comparator();
- for (size_t i = 0; i < files_[level].size(); ) {
+ for (size_t i = 0; i < files_[level].size();) {
FileMetaData* f = files_[level][i++];
const Slice file_start = f->smallest.user_key();
const Slice file_limit = f->largest.user_key();
user_begin = file_start;
inputs->clear();
i = 0;
- } else if (end != nullptr && user_cmp->Compare(file_limit,
- user_end) > 0) {
+ } else if (end != nullptr &&
+ user_cmp->Compare(file_limit, user_end) > 0) {
user_end = file_limit;
inputs->clear();
i = 0;
public:
// Initialize a builder with the files from *base and other info from *vset
- Builder(VersionSet* vset, Version* base)
- : vset_(vset),
- base_(base) {
+ Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) {
base_->Ref();
BySmallestKey cmp;
cmp.internal_comparator = &vset_->icmp_;
const FileSet* added = levels_[level].added_files;
std::vector<FileMetaData*> to_unref;
to_unref.reserve(added->size());
- for (FileSet::const_iterator it = added->begin();
- it != added->end(); ++it) {
+ for (FileSet::const_iterator it = added->begin(); it != added->end();
+ ++it) {
to_unref.push_back(*it);
}
delete added;
// Delete files
const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
- iter != del.end();
- ++iter) {
+ iter != del.end(); ++iter) {
const int level = iter->first;
const uint64_t number = iter->second;
levels_[level].deleted_files.insert(number);
const FileSet* added = levels_[level].added_files;
v->files_[level].reserve(base_files.size() + added->size());
for (FileSet::const_iterator added_iter = added->begin();
- added_iter != added->end();
- ++added_iter) {
+ added_iter != added->end(); ++added_iter) {
// Add all smaller files listed in base_
- for (std::vector<FileMetaData*>::const_iterator bpos
- = std::upper_bound(base_iter, base_end, *added_iter, cmp);
- base_iter != bpos;
- ++base_iter) {
+ for (std::vector<FileMetaData*>::const_iterator bpos =
+ std::upper_bound(base_iter, base_end, *added_iter, cmp);
+ base_iter != bpos; ++base_iter) {
MaybeAddFile(v, level, *base_iter);
}
// Make sure there is no overlap in levels > 0
if (level > 0) {
for (uint32_t i = 1; i < v->files_[level].size(); i++) {
- const InternalKey& prev_end = v->files_[level][i-1]->largest;
+ const InternalKey& prev_end = v->files_[level][i - 1]->largest;
const InternalKey& this_begin = v->files_[level][i]->smallest;
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
std::vector<FileMetaData*>* files = &v->files_[level];
if (level > 0 && !files->empty()) {
// Must not overlap
- assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
+ assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest,
f->smallest) < 0);
}
f->refs++;
}
};
-VersionSet::VersionSet(const std::string& dbname,
- const Options* options,
+VersionSet::VersionSet(const std::string& dbname, const Options* options,
TableCache* table_cache,
const InternalKeyComparator* cmp)
: env_(options->env),
return s;
}
-Status VersionSet::Recover(bool *save_manifest) {
+Status VersionSet::Recover(bool* save_manifest) {
struct LogReporter : public log::Reader::Reporter {
Status* status;
virtual void Corruption(size_t bytes, const Status& s) {
if (!s.ok()) {
return s;
}
- if (current.empty() || current[current.size()-1] != '\n') {
+ if (current.empty() || current[current.size() - 1] != '\n') {
return Status::Corruption("CURRENT file does not end with newline");
}
current.resize(current.size() - 1);
s = env_->NewSequentialFile(dscname, &file);
if (!s.ok()) {
if (s.IsNotFound()) {
- return Status::Corruption(
- "CURRENT points to a non-existent file", s.ToString());
+ return Status::Corruption("CURRENT points to a non-existent file",
+ s.ToString());
}
return s;
}
{
LogReporter reporter;
reporter.status = &s;
- log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
+ log::Reader reader(file, &reporter, true /*checksum*/,
+ 0 /*initial_offset*/);
Slice record;
std::string scratch;
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
int best_level = -1;
double best_score = -1;
- for (int level = 0; level < config::kNumLevels-1; level++) {
+ for (int level = 0; level < config::kNumLevels - 1; level++) {
double score;
if (level == 0) {
// We treat level-0 specially by bounding the number of files
// setting, or very high compression ratios, or lots of
// overwrites/deletions).
score = v->files_[level].size() /
- static_cast<double>(config::kL0_CompactionTrigger);
+ static_cast<double>(config::kL0_CompactionTrigger);
} else {
// Compute the ratio of current size to size limit.
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
// Update code if kNumLevels changes
- assert(config::kNumLevels == 7);
+ static_assert(config::kNumLevels == 7, "");
snprintf(scratch->buffer, sizeof(scratch->buffer),
- "files[ %d %d %d %d %d %d %d ]",
- int(current_->files_[0].size()),
- int(current_->files_[1].size()),
- int(current_->files_[2].size()),
- int(current_->files_[3].size()),
- int(current_->files_[4].size()),
- int(current_->files_[5].size()),
- int(current_->files_[6].size()));
+ "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()),
+ int(current_->files_[1].size()), int(current_->files_[2].size()),
+ int(current_->files_[3].size()), int(current_->files_[4].size()),
+ int(current_->files_[5].size()), int(current_->files_[6].size()));
return scratch->buffer;
}
}
void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
- for (Version* v = dummy_versions_.next_;
- v != &dummy_versions_;
+ for (Version* v = dummy_versions_.next_; v != &dummy_versions_;
v = v->next_) {
for (int level = 0; level < config::kNumLevels; level++) {
const std::vector<FileMetaData*>& files = v->files_[level];
for (int level = 1; level < config::kNumLevels - 1; level++) {
for (size_t i = 0; i < current_->files_[level].size(); i++) {
const FileMetaData* f = current_->files_[level][i];
- current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
+ current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest,
&overlaps);
const int64_t sum = TotalFileSize(overlaps);
if (sum > result) {
// *smallest, *largest.
// REQUIRES: inputs is not empty
void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
- InternalKey* smallest,
- InternalKey* largest) {
+ InternalKey* smallest, InternalKey* largest) {
assert(!inputs.empty());
smallest->Clear();
largest->Clear();
// REQUIRES: inputs is not empty
void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
const std::vector<FileMetaData*>& inputs2,
- InternalKey* smallest,
- InternalKey* largest) {
+ InternalKey* smallest, InternalKey* largest) {
std::vector<FileMetaData*> all = inputs1;
all.insert(all.end(), inputs2.begin(), inputs2.end());
GetRange(all, smallest, largest);
if (c->level() + which == 0) {
const std::vector<FileMetaData*>& files = c->inputs_[which];
for (size_t i = 0; i < files.size(); i++) {
- list[num++] = table_cache_->NewIterator(
- options, files[i]->number, files[i]->file_size);
+ list[num++] = table_cache_->NewIterator(options, files[i]->number,
+ files[i]->file_size);
}
} else {
// Create concatenating iterator for the files from this level
if (size_compaction) {
level = current_->compaction_level_;
assert(level >= 0);
- assert(level+1 < config::kNumLevels);
+ assert(level + 1 < config::kNumLevels);
c = new Compaction(options_, level);
// Pick the first file that comes after compact_pointer_[level]
return c;
}
+// Finds the largest key in a vector of files. Returns true if files it not
+// empty.
+bool FindLargestKey(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& files,
+ InternalKey* largest_key) {
+ if (files.empty()) {
+ return false;
+ }
+ *largest_key = files[0]->largest;
+ for (size_t i = 1; i < files.size(); ++i) {
+ FileMetaData* f = files[i];
+ if (icmp.Compare(f->largest, *largest_key) > 0) {
+ *largest_key = f->largest;
+ }
+ }
+ return true;
+}
+
+// Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and
+// user_key(l2) = user_key(u1)
+FileMetaData* FindSmallestBoundaryFile(
+ const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ const InternalKey& largest_key) {
+ const Comparator* user_cmp = icmp.user_comparator();
+ FileMetaData* smallest_boundary_file = nullptr;
+ for (size_t i = 0; i < level_files.size(); ++i) {
+ FileMetaData* f = level_files[i];
+ if (icmp.Compare(f->smallest, largest_key) > 0 &&
+ user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) ==
+ 0) {
+ if (smallest_boundary_file == nullptr ||
+ icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) {
+ smallest_boundary_file = f;
+ }
+ }
+ }
+ return smallest_boundary_file;
+}
+
+// Extracts the largest file b1 from |compaction_files| and then searches for a
+// b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a
+// file b2 (known as a boundary file) it adds it to |compaction_files| and then
+// searches again using this new upper bound.
+//
+// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and
+// user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a
+// subsequent get operation will yield an incorrect result because it will
+// return the record from b2 in level i rather than from b1 because it searches
+// level by level for records matching the supplied user key.
+//
+// parameters:
+// in level_files: List of files to search for boundary files.
+// in/out compaction_files: List of files to extend by adding boundary files.
+void AddBoundaryInputs(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ std::vector<FileMetaData*>* compaction_files) {
+ InternalKey largest_key;
+
+ // Quick return if compaction_files is empty.
+ if (!FindLargestKey(icmp, *compaction_files, &largest_key)) {
+ return;
+ }
+
+ bool continue_searching = true;
+ while (continue_searching) {
+ FileMetaData* smallest_boundary_file =
+ FindSmallestBoundaryFile(icmp, level_files, largest_key);
+
+ // If a boundary file was found advance largest_key, otherwise we're done.
+ if (smallest_boundary_file != NULL) {
+ compaction_files->push_back(smallest_boundary_file);
+ largest_key = smallest_boundary_file->largest;
+ } else {
+ continue_searching = false;
+ }
+ }
+}
+
void VersionSet::SetupOtherInputs(Compaction* c) {
const int level = c->level();
InternalKey smallest, largest;
+
+ AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
GetRange(c->inputs_[0], &smallest, &largest);
- current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
+ current_->GetOverlappingInputs(level + 1, &smallest, &largest,
+ &c->inputs_[1]);
// Get entire range covered by compaction
InternalKey all_start, all_limit;
if (!c->inputs_[1].empty()) {
std::vector<FileMetaData*> expanded0;
current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
+ AddBoundaryInputs(icmp_, current_->files_[level], &expanded0);
const int64_t inputs0_size = TotalFileSize(c->inputs_[0]);
const int64_t inputs1_size = TotalFileSize(c->inputs_[1]);
const int64_t expanded0_size = TotalFileSize(expanded0);
InternalKey new_start, new_limit;
GetRange(expanded0, &new_start, &new_limit);
std::vector<FileMetaData*> expanded1;
- current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
+ current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
&expanded1);
if (expanded1.size() == c->inputs_[1].size()) {
Log(options_->info_log,
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
- level,
- int(c->inputs_[0].size()),
- int(c->inputs_[1].size()),
- long(inputs0_size), long(inputs1_size),
- int(expanded0.size()),
- int(expanded1.size()),
- long(expanded0_size), long(inputs1_size));
+ level, int(c->inputs_[0].size()), int(c->inputs_[1].size()),
+ long(inputs0_size), long(inputs1_size), int(expanded0.size()),
+ int(expanded1.size()), long(expanded0_size), long(inputs1_size));
smallest = new_start;
largest = new_limit;
c->inputs_[0] = expanded0;
c->edit_.SetCompactPointer(level, largest);
}
-Compaction* VersionSet::CompactRange(
- int level,
- const InternalKey* begin,
- const InternalKey* end) {
+Compaction* VersionSet::CompactRange(int level, const InternalKey* begin,
+ const InternalKey* end) {
std::vector<FileMetaData*> inputs;
current_->GetOverlappingInputs(level, begin, end, &inputs);
if (inputs.empty()) {
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
- for (; level_ptrs_[lvl] < files.size(); ) {
+ for (; level_ptrs_[lvl] < files.size();) {
FileMetaData* f = files[level_ptrs_[lvl]];
if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
// We've advanced far enough
// Scan to find earliest grandparent file that contains key.
const InternalKeyComparator* icmp = &vset->icmp_;
while (grandparent_index_ < grandparents_.size() &&
- icmp->Compare(internal_key,
- grandparents_[grandparent_index_]->largest.Encode()) > 0) {
+ icmp->Compare(internal_key,
+ grandparents_[grandparent_index_]->largest.Encode()) >
+ 0) {
if (seen_key_) {
overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
}
#include <map>
#include <set>
#include <vector>
+
#include "db/dbformat.h"
#include "db/version_edit.h"
#include "port/port.h"
namespace leveldb {
-namespace log { class Writer; }
+namespace log {
+class Writer;
+}
class Compaction;
class Iterator;
// Return files.size() if there is no such file.
// REQUIRES: "files" contains a sorted list of non-overlapping files.
int FindFile(const InternalKeyComparator& icmp,
- const std::vector<FileMetaData*>& files,
- const Slice& key);
+ const std::vector<FileMetaData*>& files, const Slice& key);
// Returns true iff some file in "files" overlaps the user key range
// [*smallest,*largest].
class Version {
public:
- // Append to *iters a sequence of iterators that will
- // yield the contents of this Version when merged together.
- // REQUIRES: This version has been saved (see VersionSet::SaveTo)
- void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
-
// Lookup the value for key. If found, store it in *val and
// return OK. Else return a non-OK status. Fills *stats.
// REQUIRES: lock is not held
FileMetaData* seek_file;
int seek_file_level;
};
+
+ // Append to *iters a sequence of iterators that will
+ // yield the contents of this Version when merged together.
+ // REQUIRES: This version has been saved (see VersionSet::SaveTo)
+ void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
+
Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
GetStats* stats);
void GetOverlappingInputs(
int level,
- const InternalKey* begin, // nullptr means before all keys
- const InternalKey* end, // nullptr means after all keys
+ const InternalKey* begin, // nullptr means before all keys
+ const InternalKey* end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs);
// Returns true iff some file in the specified level overlaps
// some part of [*smallest_user_key,*largest_user_key].
// smallest_user_key==nullptr represents a key smaller than all the DB's keys.
// largest_user_key==nullptr represents a key largest than all the DB's keys.
- bool OverlapInLevel(int level,
- const Slice* smallest_user_key,
+ bool OverlapInLevel(int level, const Slice* smallest_user_key,
const Slice* largest_user_key);
// Return the level at which we should place a new memtable compaction
friend class VersionSet;
class LevelFileNumIterator;
+
+ explicit Version(VersionSet* vset)
+ : vset_(vset),
+ next_(this),
+ prev_(this),
+ refs_(0),
+ file_to_compact_(nullptr),
+ file_to_compact_level_(-1),
+ compaction_score_(-1),
+ compaction_level_(-1) {}
+
+ Version(const Version&) = delete;
+ Version& operator=(const Version&) = delete;
+
+ ~Version();
+
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
// Call func(arg, level, f) for every file that overlaps user_key in
// false, makes no more calls.
//
// REQUIRES: user portion of internal_key == user_key.
- void ForEachOverlapping(Slice user_key, Slice internal_key,
- void* arg,
+ void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
bool (*func)(void*, int, FileMetaData*));
- VersionSet* vset_; // VersionSet to which this Version belongs
- Version* next_; // Next version in linked list
- Version* prev_; // Previous version in linked list
- int refs_; // Number of live refs to this version
+ VersionSet* vset_; // VersionSet to which this Version belongs
+ Version* next_; // Next version in linked list
+ Version* prev_; // Previous version in linked list
+ int refs_; // Number of live refs to this version
// List of files per level
std::vector<FileMetaData*> files_[config::kNumLevels];
// are initialized by Finalize().
double compaction_score_;
int compaction_level_;
-
- explicit Version(VersionSet* vset)
- : vset_(vset), next_(this), prev_(this), refs_(0),
- file_to_compact_(nullptr),
- file_to_compact_level_(-1),
- compaction_score_(-1),
- compaction_level_(-1) {
- }
-
- ~Version();
-
- // No copying allowed
- Version(const Version&);
- void operator=(const Version&);
};
class VersionSet {
public:
- VersionSet(const std::string& dbname,
- const Options* options,
- TableCache* table_cache,
- const InternalKeyComparator*);
+ VersionSet(const std::string& dbname, const Options* options,
+ TableCache* table_cache, const InternalKeyComparator*);
+ VersionSet(const VersionSet&) = delete;
+ VersionSet& operator=(const VersionSet&) = delete;
+
~VersionSet();
// Apply *edit to the current version to form a new descriptor that
EXCLUSIVE_LOCKS_REQUIRED(mu);
// Recover the last saved descriptor from persistent storage.
- Status Recover(bool *save_manifest);
+ Status Recover(bool* save_manifest);
// Return the current version.
Version* current() const { return current_; }
// the specified level. Returns nullptr if there is nothing in that
// level that overlaps the specified range. Caller should delete
// the result.
- Compaction* CompactRange(
- int level,
- const InternalKey* begin,
- const InternalKey* end);
+ Compaction* CompactRange(int level, const InternalKey* begin,
+ const InternalKey* end);
// Return the maximum overlapping data (in bytes) at next level for any
// file at a level >= 1.
void Finalize(Version* v);
- void GetRange(const std::vector<FileMetaData*>& inputs,
- InternalKey* smallest,
+ void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest,
InternalKey* largest);
void GetRange2(const std::vector<FileMetaData*>& inputs1,
const std::vector<FileMetaData*>& inputs2,
- InternalKey* smallest,
- InternalKey* largest);
+ InternalKey* smallest, InternalKey* largest);
void SetupOtherInputs(Compaction* c);
// Per-level key at which the next compaction at that level should start.
// Either an empty string, or a valid InternalKey.
std::string compact_pointer_[config::kNumLevels];
-
- // No copying allowed
- VersionSet(const VersionSet&);
- void operator=(const VersionSet&);
};
// A Compaction encapsulates information about a compaction.
VersionEdit edit_;
// Each compaction reads inputs from "level_" and "level_+1"
- std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
+ std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
- // State used to check for number of of overlapping grandparent files
+ // State used to check for number of overlapping grandparent files
// (parent == level_ + 1, grandparent == level_ + 2)
std::vector<FileMetaData*> grandparents_;
size_t grandparent_index_; // Index in grandparent_starts_
class FindFileTest {
public:
- std::vector<FileMetaData*> files_;
- bool disjoint_sorted_files_;
-
- FindFileTest() : disjoint_sorted_files_(true) { }
+ FindFileTest() : disjoint_sorted_files_(true) {}
~FindFileTest() {
for (int i = 0; i < files_.size(); i++) {
(smallest != nullptr ? &s : nullptr),
(largest != nullptr ? &l : nullptr));
}
+
+ bool disjoint_sorted_files_;
+
+ private:
+ std::vector<FileMetaData*> files_;
};
TEST(FindFileTest, Empty) {
ASSERT_EQ(0, Find("foo"));
- ASSERT_TRUE(! Overlaps("a", "z"));
- ASSERT_TRUE(! Overlaps(nullptr, "z"));
- ASSERT_TRUE(! Overlaps("a", nullptr));
- ASSERT_TRUE(! Overlaps(nullptr, nullptr));
+ ASSERT_TRUE(!Overlaps("a", "z"));
+ ASSERT_TRUE(!Overlaps(nullptr, "z"));
+ ASSERT_TRUE(!Overlaps("a", nullptr));
+ ASSERT_TRUE(!Overlaps(nullptr, nullptr));
}
TEST(FindFileTest, Single) {
ASSERT_EQ(1, Find("q1"));
ASSERT_EQ(1, Find("z"));
- ASSERT_TRUE(! Overlaps("a", "b"));
- ASSERT_TRUE(! Overlaps("z1", "z2"));
+ ASSERT_TRUE(!Overlaps("a", "b"));
+ ASSERT_TRUE(!Overlaps("z1", "z2"));
ASSERT_TRUE(Overlaps("a", "p"));
ASSERT_TRUE(Overlaps("a", "q"));
ASSERT_TRUE(Overlaps("a", "z"));
ASSERT_TRUE(Overlaps("q", "q"));
ASSERT_TRUE(Overlaps("q", "q1"));
- ASSERT_TRUE(! Overlaps(nullptr, "j"));
- ASSERT_TRUE(! Overlaps("r", nullptr));
+ ASSERT_TRUE(!Overlaps(nullptr, "j"));
+ ASSERT_TRUE(!Overlaps("r", nullptr));
ASSERT_TRUE(Overlaps(nullptr, "p"));
ASSERT_TRUE(Overlaps(nullptr, "p1"));
ASSERT_TRUE(Overlaps("q", nullptr));
ASSERT_TRUE(Overlaps(nullptr, nullptr));
}
-
TEST(FindFileTest, Multiple) {
Add("150", "200");
Add("200", "250");
ASSERT_EQ(3, Find("450"));
ASSERT_EQ(4, Find("451"));
- ASSERT_TRUE(! Overlaps("100", "149"));
- ASSERT_TRUE(! Overlaps("251", "299"));
- ASSERT_TRUE(! Overlaps("451", "500"));
- ASSERT_TRUE(! Overlaps("351", "399"));
+ ASSERT_TRUE(!Overlaps("100", "149"));
+ ASSERT_TRUE(!Overlaps("251", "299"));
+ ASSERT_TRUE(!Overlaps("451", "500"));
+ ASSERT_TRUE(!Overlaps("351", "399"));
ASSERT_TRUE(Overlaps("100", "150"));
ASSERT_TRUE(Overlaps("100", "200"));
Add("200", "250");
Add("300", "350");
Add("400", "450");
- ASSERT_TRUE(! Overlaps(nullptr, "149"));
- ASSERT_TRUE(! Overlaps("451", nullptr));
+ ASSERT_TRUE(!Overlaps(nullptr, "149"));
+ ASSERT_TRUE(!Overlaps("451", nullptr));
ASSERT_TRUE(Overlaps(nullptr, nullptr));
ASSERT_TRUE(Overlaps(nullptr, "150"));
ASSERT_TRUE(Overlaps(nullptr, "199"));
TEST(FindFileTest, OverlapSequenceChecks) {
Add("200", "200", 5000, 3000);
- ASSERT_TRUE(! Overlaps("199", "199"));
- ASSERT_TRUE(! Overlaps("201", "300"));
+ ASSERT_TRUE(!Overlaps("199", "199"));
+ ASSERT_TRUE(!Overlaps("201", "300"));
ASSERT_TRUE(Overlaps("200", "200"));
ASSERT_TRUE(Overlaps("190", "200"));
ASSERT_TRUE(Overlaps("200", "210"));
Add("150", "600");
Add("400", "500");
disjoint_sorted_files_ = false;
- ASSERT_TRUE(! Overlaps("100", "149"));
- ASSERT_TRUE(! Overlaps("601", "700"));
+ ASSERT_TRUE(!Overlaps("100", "149"));
+ ASSERT_TRUE(!Overlaps("601", "700"));
ASSERT_TRUE(Overlaps("100", "150"));
ASSERT_TRUE(Overlaps("100", "200"));
ASSERT_TRUE(Overlaps("100", "300"));
ASSERT_TRUE(Overlaps("600", "700"));
}
-} // namespace leveldb
+void AddBoundaryInputs(const InternalKeyComparator& icmp,
+ const std::vector<FileMetaData*>& level_files,
+ std::vector<FileMetaData*>* compaction_files);
+
+class AddBoundaryInputsTest {
+ public:
+ std::vector<FileMetaData*> level_files_;
+ std::vector<FileMetaData*> compaction_files_;
+ std::vector<FileMetaData*> all_files_;
+ InternalKeyComparator icmp_;
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
+ AddBoundaryInputsTest() : icmp_(BytewiseComparator()){};
+
+ ~AddBoundaryInputsTest() {
+ for (size_t i = 0; i < all_files_.size(); ++i) {
+ delete all_files_[i];
+ }
+ all_files_.clear();
+ };
+
+ FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest,
+ InternalKey largest) {
+ FileMetaData* f = new FileMetaData();
+ f->number = number;
+ f->smallest = smallest;
+ f->largest = largest;
+ all_files_.push_back(f);
+ return f;
+ }
+};
+
+TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_TRUE(compaction_files_.empty());
+ ASSERT_TRUE(level_files_.empty());
}
+
+TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(1, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_TRUE(level_files_.empty());
+}
+
+TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ level_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_TRUE(compaction_files_.empty());
+ ASSERT_EQ(1, level_files_.size());
+ ASSERT_EQ(f1, level_files_[0]);
+}
+
+TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("100", 1, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("200", 2, kTypeValue),
+ InternalKey(InternalKey("200", 1, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+
+ level_files_.push_back(f3);
+ level_files_.push_back(f2);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f2);
+ compaction_files_.push_back(f3);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(2, compaction_files_.size());
+}
+
+TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
+ InternalKey(InternalKey("100", 2, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 1, kTypeValue),
+ InternalKey(InternalKey("200", 3, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+
+ level_files_.push_back(f3);
+ level_files_.push_back(f2);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(2, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f2, compaction_files_[1]);
+}
+
+TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
+ InternalKey(InternalKey("100", 3, kTypeValue)));
+
+ level_files_.push_back(f2);
+ level_files_.push_back(f3);
+ level_files_.push_back(f1);
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(3, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f3, compaction_files_[1]);
+ ASSERT_EQ(f2, compaction_files_[2]);
+}
+
+TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
+ FileMetaData* f1 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f2 =
+ CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
+ InternalKey(InternalKey("100", 5, kTypeValue)));
+ FileMetaData* f3 =
+ CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
+ InternalKey(InternalKey("300", 1, kTypeValue)));
+ FileMetaData* f4 =
+ CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
+ InternalKey(InternalKey("100", 3, kTypeValue)));
+
+ level_files_.push_back(f2);
+ level_files_.push_back(f3);
+ level_files_.push_back(f4);
+
+ compaction_files_.push_back(f1);
+
+ AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
+ ASSERT_EQ(3, compaction_files_.size());
+ ASSERT_EQ(f1, compaction_files_[0]);
+ ASSERT_EQ(f4, compaction_files_[1]);
+ ASSERT_EQ(f3, compaction_files_[2]);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
#include "leveldb/write_batch.h"
-#include "leveldb/db.h"
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
+#include "leveldb/db.h"
#include "util/coding.h"
namespace leveldb {
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
static const size_t kHeader = 12;
-WriteBatch::WriteBatch() {
- Clear();
-}
+WriteBatch::WriteBatch() { Clear(); }
-WriteBatch::~WriteBatch() { }
+WriteBatch::~WriteBatch() {}
-WriteBatch::Handler::~Handler() { }
+WriteBatch::Handler::~Handler() {}
void WriteBatch::Clear() {
rep_.clear();
rep_.resize(kHeader);
}
-size_t WriteBatch::ApproximateSize() const {
- return rep_.size();
-}
+size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
Status WriteBatch::Iterate(Handler* handler) const {
Slice input(rep_);
PutLengthPrefixedSlice(&rep_, key);
}
-void WriteBatch::Append(const WriteBatch &source) {
+void WriteBatch::Append(const WriteBatch& source) {
WriteBatchInternal::Append(this, &source);
}
};
} // namespace
-Status WriteBatchInternal::InsertInto(const WriteBatch* b,
- MemTable* memtable) {
+Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
MemTableInserter inserter;
inserter.sequence_ = WriteBatchInternal::Sequence(b);
inserter.mem_ = memtable;
// this batch.
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
- static Slice Contents(const WriteBatch* batch) {
- return Slice(batch->rep_);
- }
+ static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); }
- static size_t ByteSize(const WriteBatch* batch) {
- return batch->rep_.size();
- }
+ static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); }
static void SetContents(WriteBatch* batch, const Slice& contents);
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
return state;
}
-class WriteBatchTest { };
+class WriteBatchTest {};
TEST(WriteBatchTest, Empty) {
WriteBatch batch;
WriteBatchInternal::SetSequence(&batch, 100);
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
- ASSERT_EQ("Put(baz, boo)@102"
- "Delete(box)@101"
- "Put(foo, bar)@100",
- PrintContents(&batch));
+ ASSERT_EQ(
+ "Put(baz, boo)@102"
+ "Delete(box)@101"
+ "Put(foo, bar)@100",
+ PrintContents(&batch));
}
TEST(WriteBatchTest, Corruption) {
WriteBatchInternal::SetSequence(&batch, 200);
Slice contents = WriteBatchInternal::Contents(&batch);
WriteBatchInternal::SetContents(&batch,
- Slice(contents.data(),contents.size()-1));
- ASSERT_EQ("Put(foo, bar)@200"
- "ParseError()",
- PrintContents(&batch));
+ Slice(contents.data(), contents.size() - 1));
+ ASSERT_EQ(
+ "Put(foo, bar)@200"
+ "ParseError()",
+ PrintContents(&batch));
}
TEST(WriteBatchTest, Append) {
WriteBatchInternal::SetSequence(&b1, 200);
WriteBatchInternal::SetSequence(&b2, 300);
b1.Append(b2);
- ASSERT_EQ("",
- PrintContents(&b1));
+ ASSERT_EQ("", PrintContents(&b1));
b2.Put("a", "va");
b1.Append(b2);
- ASSERT_EQ("Put(a, va)@200",
- PrintContents(&b1));
+ ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
b2.Clear();
b2.Put("b", "vb");
b1.Append(b2);
- ASSERT_EQ("Put(a, va)@200"
- "Put(b, vb)@201",
- PrintContents(&b1));
+ ASSERT_EQ(
+ "Put(a, va)@200"
+ "Put(b, vb)@201",
+ PrintContents(&b1));
b2.Delete("foo");
b1.Append(b2);
- ASSERT_EQ("Put(a, va)@200"
- "Put(b, vb)@202"
- "Put(b, vb)@201"
- "Delete(foo)@203",
- PrintContents(&b1));
+ ASSERT_EQ(
+ "Put(a, va)@200"
+ "Put(b, vb)@202"
+ "Put(b, vb)@201"
+ "Delete(foo)@203",
+ PrintContents(&b1));
}
TEST(WriteBatchTest, ApproximateSize) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include <sqlite3.h>
#include <stdio.h>
#include <stdlib.h>
-#include <sqlite3.h>
+
#include "util/histogram.h"
#include "util/random.h"
#include "util/testutil.h"
"fillrand100K,"
"fillseq100K,"
"readseq,"
- "readrand100K,"
- ;
+ "readrand100K,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
// Use the db with the following name.
static const char* FLAGS_db = nullptr;
-inline
-static void ExecErrorCheck(int status, char *err_msg) {
+inline static void ExecErrorCheck(int status, char* err_msg) {
if (status != SQLITE_OK) {
fprintf(stderr, "SQL error: %s\n", err_msg);
sqlite3_free(err_msg);
}
}
-inline
-static void StepErrorCheck(int status) {
+inline static void StepErrorCheck(int status) {
if (status != SQLITE_DONE) {
fprintf(stderr, "SQL step error: status = %d\n", status);
exit(1);
}
}
-inline
-static void ErrorCheck(int status) {
+inline static void ErrorCheck(int status) {
if (status != SQLITE_OK) {
fprintf(stderr, "sqlite3 error: status = %d\n", status);
exit(1);
}
}
-inline
-static void WalCheckpoint(sqlite3* db_) {
+inline static void WalCheckpoint(sqlite3* db_) {
// Flush all writes to disk
if (FLAGS_WAL_enabled) {
sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
start++;
}
int limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
// State kept for progress messages
int done_;
- int next_report_; // When to report next
+ int next_report_; // When to report next
void PrintHeader() {
const int kKeySize = 16;
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) {
- message_ = std::string(rate) + " " + message_;
+ message_ = std::string(rate) + " " + message_;
} else {
message_ = rate;
}
}
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- (finish - start_) * 1e6 / done_,
- (message_.empty() ? "" : " "),
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
message_.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
}
public:
- enum Order {
- SEQUENTIAL,
- RANDOM
- };
- enum DBState {
- FRESH,
- EXISTING
- };
+ enum Order { SEQUENTIAL, RANDOM };
+ enum DBState { FRESH, EXISTING };
Benchmark()
- : db_(nullptr),
- db_num_(0),
- num_(FLAGS_num),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- bytes_(0),
- rand_(301) {
+ : db_(nullptr),
+ db_num_(0),
+ num_(FLAGS_num),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ bytes_(0),
+ rand_(301) {
std::vector<std::string> files;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
// Open database
std::string tmp_dir;
Env::Default()->GetTestDirectory(&tmp_dir);
- snprintf(file_name, sizeof(file_name),
- "%s/dbbench_sqlite3-%d.db",
- tmp_dir.c_str(),
- db_num_);
+ snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
+ tmp_dir.c_str(), db_num_);
status = sqlite3_open(file_name, &db_);
if (status) {
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
- status = sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr,
- &err_msg);
+ status =
+ sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
// Change locking mode to exclusive and create tables/index for database
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
std::string create_stmt =
- "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
- std::string stmt_array[] = { locking_stmt, create_stmt };
+ "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
+ std::string stmt_array[] = {locking_stmt, create_stmt};
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
for (int i = 0; i < stmt_array_length; i++) {
- status = sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr,
- &err_msg);
+ status =
+ sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
}
}
- void Write(bool write_sync, Order order, DBState state,
- int num_entries, int value_size, int entries_per_batch) {
+ void Write(bool write_sync, Order order, DBState state, int num_entries,
+ int value_size, int entries_per_batch) {
// Create new database if state == FRESH
if (state == FRESH) {
if (FLAGS_use_existing_db) {
std::string end_trans_str = "END TRANSACTION;";
// Check for synchronous flag in options
- std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
- "PRAGMA synchronous = OFF";
+ std::string sync_stmt =
+ (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg);
// Preparing sqlite3 statements
- status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
- &replace_stmt, nullptr);
+ status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
+ nullptr);
ErrorCheck(status);
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
&begin_trans_stmt, nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
- &end_trans_stmt, nullptr);
+ status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+ nullptr);
ErrorCheck(status);
bool transaction = (entries_per_batch > 1);
const char* value = gen_.Generate(value_size).data();
// Create values for key-value pair
- const int k = (order == SEQUENTIAL) ? i + j :
- (rand_.Next() % num_entries);
+ const int k =
+ (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
// Bind KV values into replace_stmt
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
ErrorCheck(status);
- status = sqlite3_bind_blob(replace_stmt, 2, value,
- value_size, SQLITE_STATIC);
+ status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
+ SQLITE_STATIC);
ErrorCheck(status);
// Execute replace_stmt
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
&begin_trans_stmt, nullptr);
ErrorCheck(status);
- status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
- &end_trans_stmt, nullptr);
+ status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+ nullptr);
ErrorCheck(status);
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
ErrorCheck(status);
ErrorCheck(status);
// Execute read statement
- while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {}
+ while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
+ }
StepErrorCheck(status);
// Reset SQLite statement for another use
void ReadSequential() {
int status;
- sqlite3_stmt *pStmt;
+ sqlite3_stmt* pStmt;
std::string read_str = "SELECT * FROM test ORDER BY key";
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
status = sqlite3_finalize(pStmt);
ErrorCheck(status);
}
-
};
} // namespace leveldb
// Choose a location for the test database if none given with --db=<path>
if (FLAGS_db == nullptr) {
- leveldb::Env::Default()->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include <kcpolydb.h>
#include <stdio.h>
#include <stdlib.h>
-#include <kcpolydb.h>
+
#include "util/histogram.h"
#include "util/random.h"
#include "util/testutil.h"
"fillrand100K,"
"fillseq100K,"
"readseq100K,"
- "readrand100K,"
- ;
+ "readrand100K,";
// Number of key/values to place in database
static int FLAGS_num = 1000000;
// Use the db with the following name.
static const char* FLAGS_db = nullptr;
-inline
-static void DBSynchronize(kyotocabinet::TreeDB* db_)
-{
+inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
// Synchronize will flush writes to disk
if (!db_->synchronize()) {
fprintf(stderr, "synchronize error: %s\n", db_->error().name());
start++;
}
int limit = s.size();
- while (limit > start && isspace(s[limit-1])) {
+ while (limit > start && isspace(s[limit - 1])) {
limit--;
}
return Slice(s.data() + start, limit - start);
// State kept for progress messages
int done_;
- int next_report_; // When to report next
+ int next_report_; // When to report next
void PrintHeader() {
const int kKeySize = 16;
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
- ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
- / 1048576.0));
+ ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+ 1048576.0));
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
- (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
- / 1048576.0));
+ (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+ 1048576.0));
PrintWarnings();
fprintf(stdout, "------------------------------------------------\n");
}
void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
- fprintf(stdout,
- "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
- );
+ fprintf(
+ stdout,
+ "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif
#ifndef NDEBUG
fprintf(stdout,
done_++;
if (done_ >= next_report_) {
- if (next_report_ < 1000) next_report_ += 100;
- else if (next_report_ < 5000) next_report_ += 500;
- else if (next_report_ < 10000) next_report_ += 1000;
- else if (next_report_ < 50000) next_report_ += 5000;
- else if (next_report_ < 100000) next_report_ += 10000;
- else if (next_report_ < 500000) next_report_ += 50000;
- else next_report_ += 100000;
+ if (next_report_ < 1000)
+ next_report_ += 100;
+ else if (next_report_ < 5000)
+ next_report_ += 500;
+ else if (next_report_ < 10000)
+ next_report_ += 1000;
+ else if (next_report_ < 50000)
+ next_report_ += 5000;
+ else if (next_report_ < 100000)
+ next_report_ += 10000;
+ else if (next_report_ < 500000)
+ next_report_ += 50000;
+ else
+ next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr);
}
snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) {
- message_ = std::string(rate) + " " + message_;
+ message_ = std::string(rate) + " " + message_;
} else {
message_ = rate;
}
}
- fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
- name.ToString().c_str(),
- (finish - start_) * 1e6 / done_,
- (message_.empty() ? "" : " "),
+ fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+ (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
message_.c_str());
if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
}
public:
- enum Order {
- SEQUENTIAL,
- RANDOM
- };
- enum DBState {
- FRESH,
- EXISTING
- };
+ enum Order { SEQUENTIAL, RANDOM };
+ enum DBState { FRESH, EXISTING };
Benchmark()
- : db_(nullptr),
- num_(FLAGS_num),
- reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
- bytes_(0),
- rand_(301) {
+ : db_(nullptr),
+ num_(FLAGS_num),
+ reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+ bytes_(0),
+ rand_(301) {
std::vector<std::string> files;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
}
private:
- void Open(bool sync) {
+ void Open(bool sync) {
assert(db_ == nullptr);
// Initialize db_
db_num_++;
std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir);
- snprintf(file_name, sizeof(file_name),
- "%s/dbbench_polyDB-%d.kct",
- test_dir.c_str(),
- db_num_);
+ snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
+ test_dir.c_str(), db_num_);
// Create tuning options and open the database
- int open_options = kyotocabinet::PolyDB::OWRITER |
- kyotocabinet::PolyDB::OCREATE;
- int tune_options = kyotocabinet::TreeDB::TSMALL |
- kyotocabinet::TreeDB::TLINEAR;
+ int open_options =
+ kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE;
+ int tune_options =
+ kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR;
if (FLAGS_compression) {
tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
db_->tune_compressor(&comp_);
db_->tune_options(tune_options);
db_->tune_page_cache(FLAGS_cache_size);
db_->tune_page(FLAGS_page_size);
- db_->tune_map(256LL<<20);
+ db_->tune_map(256LL << 20);
if (sync) {
open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
}
}
}
- void Write(bool sync, Order order, DBState state,
- int num_entries, int value_size, int entries_per_batch) {
+ void Write(bool sync, Order order, DBState state, int num_entries,
+ int value_size, int entries_per_batch) {
// Create new database if state == FRESH
if (state == FRESH) {
if (FLAGS_use_existing_db) {
}
// Write to database
- for (int i = 0; i < num_entries; i++)
- {
+ for (int i = 0; i < num_entries; i++) {
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
char key[100];
snprintf(key, sizeof(key), "%016d", k);
// Choose a location for the test database if none given with --db=<path>
if (FLAGS_db == nullptr) {
- leveldb::Env::Default()->GetTestDirectory(&default_db_path);
- default_db_path += "/dbbench";
- FLAGS_db = default_db_path.c_str();
+ leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+ default_db_path += "/dbbench";
+ FLAGS_db = default_db_path.c_str();
}
leveldb::Benchmark benchmark;
// and the caller must call Ref() at least once.
FileState() : refs_(0), size_(0) {}
+ // No copying allowed.
+ FileState(const FileState&) = delete;
+ FileState& operator=(const FileState&) = delete;
+
// Increase the reference count.
void Ref() {
MutexLock lock(&refs_mutex_);
}
private:
- // Private since only Unref() should be used to delete it.
- ~FileState() {
- Truncate();
- }
+ enum { kBlockSize = 8 * 1024 };
- // No copying allowed.
- FileState(const FileState&);
- void operator=(const FileState&);
+ // Private since only Unref() should be used to delete it.
+ ~FileState() { Truncate(); }
port::Mutex refs_mutex_;
int refs_ GUARDED_BY(refs_mutex_);
mutable port::Mutex blocks_mutex_;
std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
uint64_t size_ GUARDED_BY(blocks_mutex_);
-
- enum { kBlockSize = 8 * 1024 };
};
class SequentialFileImpl : public SequentialFile {
file_->Ref();
}
- ~SequentialFileImpl() {
- file_->Unref();
- }
+ ~SequentialFileImpl() { file_->Unref(); }
virtual Status Read(size_t n, Slice* result, char* scratch) {
Status s = file_->Read(pos_, n, result, scratch);
class RandomAccessFileImpl : public RandomAccessFile {
public:
- explicit RandomAccessFileImpl(FileState* file) : file_(file) {
- file_->Ref();
- }
+ explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
- ~RandomAccessFileImpl() {
- file_->Unref();
- }
+ ~RandomAccessFileImpl() { file_->Unref(); }
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const {
class WritableFileImpl : public WritableFile {
public:
- WritableFileImpl(FileState* file) : file_(file) {
- file_->Ref();
- }
+ WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
- ~WritableFileImpl() {
- file_->Unref();
- }
+ ~WritableFileImpl() { file_->Unref(); }
- virtual Status Append(const Slice& data) {
- return file_->Append(data);
- }
+ virtual Status Append(const Slice& data) { return file_->Append(data); }
virtual Status Close() { return Status::OK(); }
virtual Status Flush() { return Status::OK(); }
class NoOpLogger : public Logger {
public:
- virtual void Logv(const char* format, va_list ap) { }
+ virtual void Logv(const char* format, va_list ap) {}
};
class InMemoryEnv : public EnvWrapper {
public:
- explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { }
+ explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
virtual ~InMemoryEnv() {
- for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
+ for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
+ ++i) {
i->second->Unref();
}
}
MutexLock lock(&mutex_);
result->clear();
- for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
+ for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
+ ++i) {
const std::string& filename = i->first;
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
return Status::OK();
}
- virtual Status CreateDir(const std::string& dirname) {
- return Status::OK();
- }
+ virtual Status CreateDir(const std::string& dirname) { return Status::OK(); }
- virtual Status DeleteDir(const std::string& dirname) {
- return Status::OK();
- }
+ virtual Status DeleteDir(const std::string& dirname) { return Status::OK(); }
virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
MutexLock lock(&mutex_);
return Status::OK();
}
- virtual Status RenameFile(const std::string& src,
- const std::string& target) {
+ virtual Status RenameFile(const std::string& src, const std::string& target) {
MutexLock lock(&mutex_);
if (file_map_.find(src) == file_map_.end()) {
return Status::IOError(src, "File not found");
private:
// Map from filenames to FileState objects, representing a simple file system.
typedef std::map<std::string, FileState*> FileSystem;
+
port::Mutex mutex_;
FileSystem file_map_ GUARDED_BY(mutex_);
};
} // namespace
-Env* NewMemEnv(Env* base_env) {
- return new InMemoryEnv(base_env);
-}
+Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
} // namespace leveldb
#include "helpers/memenv/memenv.h"
+#include <string>
+#include <vector>
+
#include "db/db_impl.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/testharness.h"
-#include <string>
-#include <vector>
namespace leveldb {
class MemEnvTest {
public:
- Env* env_;
+ MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
+ ~MemEnvTest() { delete env_; }
- MemEnvTest()
- : env_(NewMemEnv(Env::Default())) {
- }
- ~MemEnvTest() {
- delete env_;
- }
+ Env* env_;
};
TEST(MemEnvTest, Basics) {
// Read sequentially.
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
- ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
+ ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
ASSERT_EQ(0, result.compare("hello"));
ASSERT_OK(seq_file->Skip(1));
- ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
+ ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
ASSERT_EQ(0, result.compare("world"));
- ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
+ ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
ASSERT_EQ(0, result.size());
- ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
+ ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
ASSERT_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
delete seq_file;
// Random reads.
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
- ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
+ ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
ASSERT_EQ(0, result.compare("world"));
- ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
+ ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
ASSERT_EQ(0, result.compare("hello"));
- ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
+ ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
ASSERT_EQ(0, result.compare("d"));
// Too high offset.
SequentialFile* seq_file;
Slice result;
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
- ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
+ ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
ASSERT_EQ(0, result.compare("foo"));
size_t read = 0;
}
ASSERT_TRUE(write_data == read_data);
delete seq_file;
- delete [] scratch;
+ delete[] scratch;
}
TEST(MemEnvTest, OverwriteOpenFile) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
+
#include "leveldb/export.h"
/* Exported types */
-typedef struct leveldb_t leveldb_t;
-typedef struct leveldb_cache_t leveldb_cache_t;
-typedef struct leveldb_comparator_t leveldb_comparator_t;
-typedef struct leveldb_env_t leveldb_env_t;
-typedef struct leveldb_filelock_t leveldb_filelock_t;
-typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
-typedef struct leveldb_iterator_t leveldb_iterator_t;
-typedef struct leveldb_logger_t leveldb_logger_t;
-typedef struct leveldb_options_t leveldb_options_t;
-typedef struct leveldb_randomfile_t leveldb_randomfile_t;
-typedef struct leveldb_readoptions_t leveldb_readoptions_t;
-typedef struct leveldb_seqfile_t leveldb_seqfile_t;
-typedef struct leveldb_snapshot_t leveldb_snapshot_t;
-typedef struct leveldb_writablefile_t leveldb_writablefile_t;
-typedef struct leveldb_writebatch_t leveldb_writebatch_t;
-typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
+typedef struct leveldb_t leveldb_t;
+typedef struct leveldb_cache_t leveldb_cache_t;
+typedef struct leveldb_comparator_t leveldb_comparator_t;
+typedef struct leveldb_env_t leveldb_env_t;
+typedef struct leveldb_filelock_t leveldb_filelock_t;
+typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
+typedef struct leveldb_iterator_t leveldb_iterator_t;
+typedef struct leveldb_logger_t leveldb_logger_t;
+typedef struct leveldb_options_t leveldb_options_t;
+typedef struct leveldb_randomfile_t leveldb_randomfile_t;
+typedef struct leveldb_readoptions_t leveldb_readoptions_t;
+typedef struct leveldb_seqfile_t leveldb_seqfile_t;
+typedef struct leveldb_snapshot_t leveldb_snapshot_t;
+typedef struct leveldb_writablefile_t leveldb_writablefile_t;
+typedef struct leveldb_writebatch_t leveldb_writebatch_t;
+typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
/* DB operations */
LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
size_t);
-enum {
- leveldb_no_compression = 0,
- leveldb_snappy_compression = 1
-};
+enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 };
LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
/* Comparator */
LEVELDB_EXPORT int leveldb_minor_version();
#ifdef __cplusplus
-} /* end extern "C" */
+} /* end extern "C" */
#endif
-#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
+#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
#include <stdint.h>
+
#include "leveldb/export.h"
#include "leveldb/slice.h"
virtual ~Cache();
// Opaque handle to an entry stored in the cache.
- struct Handle { };
+ struct Handle {};
// Insert a mapping from key->value into the cache and assign it
// the specified charge against the total cache capacity.
#define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_
#include <string>
+
#include "leveldb/export.h"
namespace leveldb {
// If *start < limit, changes *start to a short string in [start,limit).
// Simple comparator implementations may return with *start unchanged,
// i.e., an implementation of this method that does nothing is correct.
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const = 0;
+ virtual void FindShortestSeparator(std::string* start,
+ const Slice& limit) const = 0;
// Changes *key to a short string >= *key.
// Simple comparator implementations may return with *key unchanged,
#include <stdint.h>
#include <stdio.h>
+
#include "leveldb/export.h"
#include "leveldb/iterator.h"
#include "leveldb/options.h"
// Update CMakeLists.txt if you change these
static const int kMajorVersion = 1;
-static const int kMinorVersion = 21;
+static const int kMinorVersion = 22;
struct Options;
struct ReadOptions;
// A range of keys
struct LEVELDB_EXPORT Range {
- Slice start; // Included in the range
- Slice limit; // Not included in the range
+ Range() {}
+ Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
- Range() { }
- Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
+ Slice start; // Included in the range
+ Slice limit; // Not included in the range
};
// A DB is a persistent ordered map from keys to values.
// OK on success.
// Stores nullptr in *dbptr and returns a non-OK status on error.
// Caller should delete *dbptr when it is no longer needed.
- static Status Open(const Options& options,
- const std::string& name,
+ static Status Open(const Options& options, const std::string& name,
DB** dbptr);
DB() = default;
// Set the database entry for "key" to "value". Returns OK on success,
// and a non-OK status on error.
// Note: consider setting options.sync = true.
- virtual Status Put(const WriteOptions& options,
- const Slice& key,
+ virtual Status Put(const WriteOptions& options, const Slice& key,
const Slice& value) = 0;
// Remove the database entry (if any) for "key". Returns OK on
// a status for which Status::IsNotFound() returns true.
//
// May return some other Status on an error.
- virtual Status Get(const ReadOptions& options,
- const Slice& key, std::string* value) = 0;
+ virtual Status Get(const ReadOptions& options, const Slice& key,
+ std::string* value) = 0;
// Return a heap-allocated iterator over the contents of the database.
// The result of NewIterator() is initially invalid (caller must
#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
#include <string>
+
#include "leveldb/env.h"
#include "leveldb/export.h"
#include "leveldb/status.h"
#include <stdarg.h>
#include <stdint.h>
+
#include <string>
#include <vector>
+
#include "leveldb/export.h"
#include "leveldb/status.h"
// added to the same Env may run concurrently in different threads.
// I.e., the caller may not assume that background work items are
// serialized.
- virtual void Schedule(
- void (*function)(void* arg),
- void* arg) = 0;
+ virtual void Schedule(void (*function)(void* arg), void* arg) = 0;
// Start a new thread, invoking "function(arg)" within the new thread.
// When "function(arg)" returns, the thread will be destroyed.
virtual void StartThread(void (*function)(void* arg), void* arg) = 0;
// *path is set to a temporary directory that can be used for testing. It may
- // or many not have just been created. The directory may or may not differ
+ // or may not have just been created. The directory may or may not differ
// between runs of the same process, but subsequent calls will return the
// same directory.
virtual Status GetTestDirectory(std::string* path) = 0;
// Log the specified data to *info_log if info_log is non-null.
void Log(Logger* info_log, const char* format, ...)
-# if defined(__GNUC__) || defined(__clang__)
- __attribute__((__format__ (__printf__, 2, 3)))
-# endif
+#if defined(__GNUC__) || defined(__clang__)
+ __attribute__((__format__(__printf__, 2, 3)))
+#endif
;
// A utility routine: write "data" to the named file.
class LEVELDB_EXPORT EnvWrapper : public Env {
public:
// Initialize an EnvWrapper that delegates all calls to *t.
- explicit EnvWrapper(Env* t) : target_(t) { }
+ explicit EnvWrapper(Env* t) : target_(t) {}
virtual ~EnvWrapper();
// Return the target to which this Env forwards all calls.
Status NewLogger(const std::string& fname, Logger** result) override {
return target_->NewLogger(fname, result);
}
- uint64_t NowMicros() override {
- return target_->NowMicros();
- }
+ uint64_t NowMicros() override { return target_->NowMicros(); }
void SleepForMicroseconds(int micros) override {
target_->SleepForMicroseconds(micros);
}
#define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
#include <string>
+
#include "leveldb/export.h"
namespace leveldb {
//
// Warning: do not change the initial contents of *dst. Instead,
// append the newly constructed filter to *dst.
- virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
- const = 0;
+ virtual void CreateFilter(const Slice* keys, int n,
+ std::string* dst) const = 0;
// "filter" contains the data appended by a preceding call to
// CreateFilter() on this class. This method must return true if
// Cleanup functions are stored in a single-linked list.
// The list's head node is inlined in the iterator.
struct CleanupNode {
+ // True if the node is not used. Only head nodes might be unused.
+ bool IsEmpty() const { return function == nullptr; }
+ // Invokes the cleanup function.
+ void Run() {
+ assert(function != nullptr);
+ (*function)(arg1, arg2);
+ }
+
// The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
CleanupNode* next;
-
- // True if the node is not used. Only head nodes might be unused.
- bool IsEmpty() const { return function == nullptr; }
- // Invokes the cleanup function.
- void Run() { assert(function != nullptr); (*function)(arg1, arg2); }
};
CleanupNode cleanup_head_;
};
#define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
#include <stddef.h>
+
#include "leveldb/export.h"
namespace leveldb {
enum CompressionType {
// NOTE: do not change the values of existing entries, as these are
// part of the persistent format on disk.
- kNoCompression = 0x0,
+ kNoCompression = 0x0,
kSnappyCompression = 0x1
};
// Options to control the behavior of a database (passed to DB::Open)
struct LEVELDB_EXPORT Options {
+ // Create an Options object with default values for all fields.
+ Options();
+
// -------------------
// Parameters that affect behavior
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
const FilterPolicy* filter_policy = nullptr;
-
- // Create an Options object with default values for all fields.
- Options();
};
// Options that control read operations
struct LEVELDB_EXPORT ReadOptions {
+ ReadOptions() = default;
+
// If true, all data read from underlying storage will be
// verified against corresponding checksums.
bool verify_checksums = false;
// not have been released). If "snapshot" is null, use an implicit
// snapshot of the state at the beginning of this read operation.
const Snapshot* snapshot = nullptr;
-
- ReadOptions() = default;
};
// Options that control write operations
struct LEVELDB_EXPORT WriteOptions {
+ WriteOptions() = default;
+
// If true, the write will be flushed from the operating system
// buffer cache (by calling WritableFile::Sync()) before the write
// is considered complete. If this flag is true, writes will be
// with sync==true has similar crash semantics to a "write()"
// system call followed by "fsync()".
bool sync = false;
-
- WriteOptions() = default;
};
} // namespace leveldb
#include <assert.h>
#include <stddef.h>
#include <string.h>
+
#include <string>
+
#include "leveldb/export.h"
namespace leveldb {
class LEVELDB_EXPORT Slice {
public:
// Create an empty slice.
- Slice() : data_(""), size_(0) { }
+ Slice() : data_(""), size_(0) {}
// Create a slice that refers to d[0,n-1].
- Slice(const char* d, size_t n) : data_(d), size_(n) { }
+ Slice(const char* d, size_t n) : data_(d), size_(n) {}
// Create a slice that refers to the contents of "s"
- Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
+ Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
// Create a slice that refers to s[0,strlen(s)-1]
- Slice(const char* s) : data_(s), size_(strlen(s)) { }
+ Slice(const char* s) : data_(s), size_(strlen(s)) {}
// Intentionally copyable.
Slice(const Slice&) = default;
}
// Change this slice to refer to an empty array
- void clear() { data_ = ""; size_ = 0; }
+ void clear() {
+ data_ = "";
+ size_ = 0;
+ }
// Drop the first "n" bytes from this slice.
void remove_prefix(size_t n) {
// Return true iff "x" is a prefix of "*this"
bool starts_with(const Slice& x) const {
- return ((size_ >= x.size_) &&
- (memcmp(data_, x.data_, x.size_) == 0));
+ return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
}
private:
(memcmp(x.data(), y.data(), x.size()) == 0));
}
-inline bool operator!=(const Slice& x, const Slice& y) {
- return !(x == y);
-}
+inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
inline int Slice::compare(const Slice& b) const {
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
int r = memcmp(data_, b.data_, min_len);
if (r == 0) {
- if (size_ < b.size_) r = -1;
- else if (size_ > b.size_) r = +1;
+ if (size_ < b.size_)
+ r = -1;
+ else if (size_ > b.size_)
+ r = +1;
}
return r;
}
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
#include <algorithm>
#include <string>
+
#include "leveldb/export.h"
#include "leveldb/slice.h"
class LEVELDB_EXPORT Status {
public:
// Create a success status.
- Status() noexcept : state_(nullptr) { }
+ Status() noexcept : state_(nullptr) {}
~Status() { delete[] state_; }
Status(const Status& rhs);
std::string ToString() const;
private:
- // OK status has a null state_. Otherwise, state_ is a new[] array
- // of the following form:
- // state_[0..3] == length of message
- // state_[4] == code
- // state_[5..] == message
- const char* state_;
-
enum Code {
kOk = 0,
kNotFound = 1,
Status(Code code, const Slice& msg, const Slice& msg2);
static const char* CopyState(const char* s);
+
+ // OK status has a null state_. Otherwise, state_ is a new[] array
+ // of the following form:
+ // state_[0..3] == length of message
+ // state_[4] == code
+ // state_[5..] == message
+ const char* state_;
};
inline Status::Status(const Status& rhs) {
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
#include <stdint.h>
+
#include "leveldb/export.h"
#include "leveldb/iterator.h"
// for the duration of the returned table's lifetime.
//
// *file must remain live while this Table is in use.
- static Status Open(const Options& options,
- RandomAccessFile* file,
- uint64_t file_size,
- Table** table);
+ static Status Open(const Options& options, RandomAccessFile* file,
+ uint64_t file_size, Table** table);
Table(const Table&) = delete;
- void operator=(const Table&) = delete;
+ Table& operator=(const Table&) = delete;
~Table();
uint64_t ApproximateOffsetOf(const Slice& key) const;
private:
+ friend class TableCache;
struct Rep;
- Rep* rep_;
- explicit Table(Rep* rep) { rep_ = rep; }
static Iterator* BlockReader(void*, const ReadOptions&, const Slice&);
+ explicit Table(Rep* rep) : rep_(rep) {}
+
// Calls (*handle_result)(arg, ...) with the entry found after a call
// to Seek(key). May not make such a call if filter policy says
// that key is not present.
- friend class TableCache;
- Status InternalGet(
- const ReadOptions&, const Slice& key,
- void* arg,
- void (*handle_result)(void* arg, const Slice& k, const Slice& v));
-
+ Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
+ void (*handle_result)(void* arg, const Slice& k,
+ const Slice& v));
void ReadMeta(const Footer& footer);
void ReadFilter(const Slice& filter_handle_value);
+
+ Rep* const rep_;
};
} // namespace leveldb
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
#include <stdint.h>
+
#include "leveldb/export.h"
#include "leveldb/options.h"
#include "leveldb/status.h"
TableBuilder(const Options& options, WritableFile* file);
TableBuilder(const TableBuilder&) = delete;
- void operator=(const TableBuilder&) = delete;
+ TableBuilder& operator=(const TableBuilder&) = delete;
// REQUIRES: Either Finish() or Abandon() has been called.
~TableBuilder();
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
#include <string>
+
#include "leveldb/export.h"
#include "leveldb/status.h"
class LEVELDB_EXPORT WriteBatch {
public:
+ class LEVELDB_EXPORT Handler {
+ public:
+ virtual ~Handler();
+ virtual void Put(const Slice& key, const Slice& value) = 0;
+ virtual void Delete(const Slice& key) = 0;
+ };
+
WriteBatch();
// Intentionally copyable.
WriteBatch(const WriteBatch&) = default;
- WriteBatch& operator =(const WriteBatch&) = default;
+ WriteBatch& operator=(const WriteBatch&) = default;
~WriteBatch();
void Append(const WriteBatch& source);
// Support for iterating over the contents of a batch.
- class Handler {
- public:
- virtual ~Handler();
- virtual void Put(const Slice& key, const Slice& value) = 0;
- virtual void Delete(const Slice& key) = 0;
- };
Status Iterate(Handler* handler) const;
private:
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// Test for issue 178: a manual compaction causes deleted data to reappear.
+#include <cstdlib>
#include <iostream>
#include <sstream>
-#include <cstdlib>
#include "leveldb/db.h"
#include "leveldb/write_batch.h"
return buf;
}
-std::string Key2(int i) {
- return Key1(i) + "_xxx";
-}
+std::string Key2(int i) { return Key1(i) + "_xxx"; }
-class Issue178 { };
+class Issue178 {};
TEST(Issue178, Test) {
// Get rid of any state from an old run.
} // anonymous namespace
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
namespace leveldb {
-class Issue200 { };
+class Issue200 {};
TEST(Issue200, Test) {
// Get rid of any state from an old run.
std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
DestroyDB(dbpath, Options());
- DB *db;
+ DB* db;
Options options;
options.create_if_missing = true;
ASSERT_OK(DB::Open(options, dbpath, &db));
ASSERT_OK(db->Put(write_options, "5", "f"));
ReadOptions read_options;
- Iterator *iter = db->NewIterator(read_options);
+ Iterator* iter = db->NewIterator(read_options);
// Add an element that should not be reflected in the iterator.
ASSERT_OK(db->Put(write_options, "25", "cd"));
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
--- /dev/null
+// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include <cstdint>
+#include <cstdlib>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "leveldb/db.h"
+#include "leveldb/write_batch.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+namespace {
+
+// Creates a random number in the range of [0, max).
+int GenerateRandomNumber(int max) { return std::rand() % max; }
+
+std::string CreateRandomString(int32_t index) {
+ static const size_t len = 1024;
+ char bytes[len];
+ size_t i = 0;
+ while (i < 8) {
+ bytes[i] = 'a' + ((index >> (4 * i)) & 0xf);
+ ++i;
+ }
+ while (i < sizeof(bytes)) {
+ bytes[i] = 'a' + GenerateRandomNumber(26);
+ ++i;
+ }
+ return std::string(bytes, sizeof(bytes));
+}
+
+} // namespace
+
+class Issue320 {};
+
+TEST(Issue320, Test) {
+ std::srand(0);
+
+ bool delete_before_put = false;
+ bool keep_snapshots = true;
+
+ std::vector<std::unique_ptr<std::pair<std::string, std::string>>> test_map(
+ 10000);
+ std::vector<Snapshot const*> snapshots(100, nullptr);
+
+ DB* db;
+ Options options;
+ options.create_if_missing = true;
+
+ std::string dbpath = test::TmpDir() + "/leveldb_issue320_test";
+ ASSERT_OK(DB::Open(options, dbpath, &db));
+
+ uint32_t target_size = 10000;
+ uint32_t num_items = 0;
+ uint32_t count = 0;
+ std::string key;
+ std::string value, old_value;
+
+ WriteOptions writeOptions;
+ ReadOptions readOptions;
+ while (count < 200000) {
+ if ((++count % 1000) == 0) {
+ std::cout << "count: " << count << std::endl;
+ }
+
+ int index = GenerateRandomNumber(test_map.size());
+ WriteBatch batch;
+
+ if (test_map[index] == nullptr) {
+ num_items++;
+ test_map[index].reset(new std::pair<std::string, std::string>(
+ CreateRandomString(index), CreateRandomString(index)));
+ batch.Put(test_map[index]->first, test_map[index]->second);
+ } else {
+ ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
+ if (old_value != test_map[index]->second) {
+ std::cout << "ERROR incorrect value returned by Get" << std::endl;
+ std::cout << " count=" << count << std::endl;
+ std::cout << " old value=" << old_value << std::endl;
+ std::cout << " test_map[index]->second=" << test_map[index]->second
+ << std::endl;
+ std::cout << " test_map[index]->first=" << test_map[index]->first
+ << std::endl;
+ std::cout << " index=" << index << std::endl;
+ ASSERT_EQ(old_value, test_map[index]->second);
+ }
+
+ if (num_items >= target_size && GenerateRandomNumber(100) > 30) {
+ batch.Delete(test_map[index]->first);
+ test_map[index] = nullptr;
+ --num_items;
+ } else {
+ test_map[index]->second = CreateRandomString(index);
+ if (delete_before_put) batch.Delete(test_map[index]->first);
+ batch.Put(test_map[index]->first, test_map[index]->second);
+ }
+ }
+
+ ASSERT_OK(db->Write(writeOptions, &batch));
+
+ if (keep_snapshots && GenerateRandomNumber(10) == 0) {
+ int i = GenerateRandomNumber(snapshots.size());
+ if (snapshots[i] != nullptr) {
+ db->ReleaseSnapshot(snapshots[i]);
+ }
+ snapshots[i] = db->GetSnapshot();
+ }
+ }
+
+ for (Snapshot const* snapshot : snapshots) {
+ if (snapshot) {
+ db->ReleaseSnapshot(snapshot);
+ }
+ }
+
+ delete db;
+ DestroyDB(dbpath, options);
+}
+
+} // namespace leveldb
+
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
+++ /dev/null
-This directory contains interfaces and implementations that isolate the
-rest of the package from platform details.
-
-Code in the rest of the package includes "port.h" from this directory.
-"port.h" in turn includes a platform specific "port_<platform>.h" file
-that provides the platform specific implementation.
-
-See port_stdcxx.h for an example of what must be provided in a platform
-specific header file.
-
--- /dev/null
+This directory contains interfaces and implementations that isolate the
+rest of the package from platform details.
+
+Code in the rest of the package includes "port.h" from this directory.
+"port.h" in turn includes a platform specific "port_<platform>.h" file
+that provides the platform specific implementation.
+
+See port_stdcxx.h for an example of what must be provided in a platform
+specific header file.
+
// porting to a new platform, see "port_example.h" for documentation
// of what the new port_<platform>.h file must provide.
#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
-# include "port/port_stdcxx.h"
+#include "port/port_stdcxx.h"
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
-# include "port/port_chromium.h"
+#include "port/port_chromium.h"
#endif
#endif // STORAGE_LEVELDB_PORT_PORT_H_
#endif // HAVE_SNAPPY
#include <cassert>
+#include <condition_variable> // NOLINT
#include <cstddef>
#include <cstdint>
-#include <condition_variable> // NOLINT
-#include <mutex> // NOLINT
+#include <mutex> // NOLINT
#include <string>
#include "port/thread_annotations.h"
void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
- void AssertHeld() ASSERT_EXCLUSIVE_LOCK() { }
+ void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {}
private:
friend class CondVar;
}
void Signal() { cv_.notify_one(); }
void SignalAll() { cv_.notify_all(); }
+
private:
std::condition_variable cv_;
Mutex* const mu_;
return true;
#else
// Silence compiler warnings about unused arguments.
- (void)input; (void)length; (void)output;
+ (void)input;
+ (void)length;
+ (void)output;
#endif // HAVE_SNAPPY
return false;
return snappy::GetUncompressedLength(input, length, result);
#else
// Silence compiler warnings about unused arguments.
- (void)input; (void)length; (void)result;
+ (void)input;
+ (void)length;
+ (void)result;
return false;
#endif // HAVE_SNAPPY
}
return snappy::RawUncompress(input, length, output);
#else
// Silence compiler warnings about unused arguments.
- (void)input; (void)length; (void)output;
+ (void)input;
+ (void)length;
+ (void)output;
return false;
#endif // HAVE_SNAPPY
}
inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
// Silence compiler warnings about unused arguments.
- (void)func; (void)arg;
+ (void)func;
+ (void)arg;
return false;
}
return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
#else
// Silence compiler warnings about unused arguments.
- (void)crc; (void)buf; (void)size;
+ (void)crc;
+ (void)buf;
+ (void)size;
return 0;
#endif // HAVE_CRC32C
}
#if defined(__clang__)
-#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
-#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
#endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
#endif
#ifndef LOCK_RETURNED
-#define LOCK_RETURNED(x) \
- THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#endif
#ifndef LOCKABLE
-#define LOCKABLE \
- THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
#endif
#ifndef SCOPED_LOCKABLE
-#define SCOPED_LOCKABLE \
- THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#endif
#ifndef EXCLUSIVE_LOCK_FUNCTION
#include "table/block.h"
-#include <vector>
#include <algorithm>
+#include <vector>
+
#include "leveldb/comparator.h"
#include "table/format.h"
#include "util/coding.h"
if (size_ < sizeof(uint32_t)) {
size_ = 0; // Error marker
} else {
- size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t);
+ size_t max_restarts_allowed = (size_ - sizeof(uint32_t)) / sizeof(uint32_t);
if (NumRestarts() > max_restarts_allowed) {
// The size is too small for NumRestarts()
size_ = 0;
// If any errors are detected, returns nullptr. Otherwise, returns a
// pointer to the key delta (just past the three decoded values).
static inline const char* DecodeEntry(const char* p, const char* limit,
- uint32_t* shared,
- uint32_t* non_shared,
+ uint32_t* shared, uint32_t* non_shared,
uint32_t* value_length) {
if (limit - p < 3) return nullptr;
*shared = reinterpret_cast<const unsigned char*>(p)[0];
class Block::Iter : public Iterator {
private:
const Comparator* const comparator_;
- const char* const data_; // underlying block contents
- uint32_t const restarts_; // Offset of restart array (list of fixed32)
- uint32_t const num_restarts_; // Number of uint32_t entries in restart array
+ const char* const data_; // underlying block contents
+ uint32_t const restarts_; // Offset of restart array (list of fixed32)
+ uint32_t const num_restarts_; // Number of uint32_t entries in restart array
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
uint32_t current_;
}
public:
- Iter(const Comparator* comparator,
- const char* data,
- uint32_t restarts,
+ Iter(const Comparator* comparator, const char* data, uint32_t restarts,
uint32_t num_restarts)
: comparator_(comparator),
data_(data),
uint32_t mid = (left + right + 1) / 2;
uint32_t region_offset = GetRestartPoint(mid);
uint32_t shared, non_shared, value_length;
- const char* key_ptr = DecodeEntry(data_ + region_offset,
- data_ + restarts_,
- &shared, &non_shared, &value_length);
+ const char* key_ptr =
+ DecodeEntry(data_ + region_offset, data_ + restarts_, &shared,
+ &non_shared, &value_length);
if (key_ptr == nullptr || (shared != 0)) {
CorruptionError();
return;
}
};
-Iterator* Block::NewIterator(const Comparator* cmp) {
+Iterator* Block::NewIterator(const Comparator* comparator) {
if (size_ < sizeof(uint32_t)) {
return NewErrorIterator(Status::Corruption("bad block contents"));
}
if (num_restarts == 0) {
return NewEmptyIterator();
} else {
- return new Iter(cmp, data_, restart_offset_, num_restarts);
+ return new Iter(comparator, data_, restart_offset_, num_restarts);
}
}
#include <stddef.h>
#include <stdint.h>
+
#include "leveldb/iterator.h"
namespace leveldb {
// Initialize the block with the specified contents.
explicit Block(const BlockContents& contents);
+ Block(const Block&) = delete;
+ Block& operator=(const Block&) = delete;
+
~Block();
size_t size() const { return size_; }
Iterator* NewIterator(const Comparator* comparator);
private:
+ class Iter;
+
uint32_t NumRestarts() const;
const char* data_;
size_t size_;
- uint32_t restart_offset_; // Offset in data_ of restart array
- bool owned_; // Block owns data_[]
-
- // No copying allowed
- Block(const Block&);
- void operator=(const Block&);
-
- class Iter;
+ uint32_t restart_offset_; // Offset in data_ of restart array
+ bool owned_; // Block owns data_[]
};
} // namespace leveldb
#include "table/block_builder.h"
-#include <algorithm>
#include <assert.h>
+
+#include <algorithm>
+
#include "leveldb/comparator.h"
#include "leveldb/table_builder.h"
#include "util/coding.h"
namespace leveldb {
BlockBuilder::BlockBuilder(const Options* options)
- : options_(options),
- restarts_(),
- counter_(0),
- finished_(false) {
+ : options_(options), restarts_(), counter_(0), finished_(false) {
assert(options->block_restart_interval >= 1);
- restarts_.push_back(0); // First restart point is at offset 0
+ restarts_.push_back(0); // First restart point is at offset 0
}
void BlockBuilder::Reset() {
buffer_.clear();
restarts_.clear();
- restarts_.push_back(0); // First restart point is at offset 0
+ restarts_.push_back(0); // First restart point is at offset 0
counter_ = 0;
finished_ = false;
last_key_.clear();
}
size_t BlockBuilder::CurrentSizeEstimate() const {
- return (buffer_.size() + // Raw data buffer
- restarts_.size() * sizeof(uint32_t) + // Restart array
- sizeof(uint32_t)); // Restart array length
+ return (buffer_.size() + // Raw data buffer
+ restarts_.size() * sizeof(uint32_t) + // Restart array
+ sizeof(uint32_t)); // Restart array length
}
Slice BlockBuilder::Finish() {
Slice last_key_piece(last_key_);
assert(!finished_);
assert(counter_ <= options_->block_restart_interval);
- assert(buffer_.empty() // No values yet?
+ assert(buffer_.empty() // No values yet?
|| options_->comparator->Compare(key, last_key_piece) > 0);
size_t shared = 0;
if (counter_ < options_->block_restart_interval) {
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
#define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
+#include <stdint.h>
+
#include <vector>
-#include <stdint.h>
#include "leveldb/slice.h"
namespace leveldb {
public:
explicit BlockBuilder(const Options* options);
+ BlockBuilder(const BlockBuilder&) = delete;
+ BlockBuilder& operator=(const BlockBuilder&) = delete;
+
// Reset the contents as if the BlockBuilder was just constructed.
void Reset();
size_t CurrentSizeEstimate() const;
// Return true iff no entries have been added since the last Reset()
- bool empty() const {
- return buffer_.empty();
- }
+ bool empty() const { return buffer_.empty(); }
private:
- const Options* options_;
- std::string buffer_; // Destination buffer
- std::vector<uint32_t> restarts_; // Restart points
- int counter_; // Number of entries emitted since restart
- bool finished_; // Has Finish() been called?
- std::string last_key_;
-
- // No copying allowed
- BlockBuilder(const BlockBuilder&);
- void operator=(const BlockBuilder&);
+ const Options* options_;
+ std::string buffer_; // Destination buffer
+ std::vector<uint32_t> restarts_; // Restart points
+ int counter_; // Number of entries emitted since restart
+ bool finished_; // Has Finish() been called?
+ std::string last_key_;
};
} // namespace leveldb
static const size_t kFilterBase = 1 << kFilterBaseLg;
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
- : policy_(policy) {
-}
+ : policy_(policy) {}
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
uint64_t filter_index = (block_offset / kFilterBase);
tmp_keys_.resize(num_keys);
for (size_t i = 0; i < num_keys; i++) {
const char* base = keys_.data() + start_[i];
- size_t length = start_[i+1] - start_[i];
+ size_t length = start_[i + 1] - start_[i];
tmp_keys_[i] = Slice(base, length);
}
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
const Slice& contents)
- : policy_(policy),
- data_(nullptr),
- offset_(nullptr),
- num_(0),
- base_lg_(0) {
+ : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
size_t n = contents.size();
if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array
- base_lg_ = contents[n-1];
+ base_lg_ = contents[n - 1];
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
if (last_word > n - 5) return;
data_ = contents.data();
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
uint64_t index = block_offset >> base_lg_;
if (index < num_) {
- uint32_t start = DecodeFixed32(offset_ + index*4);
- uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
+ uint32_t start = DecodeFixed32(offset_ + index * 4);
+ uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
Slice filter = Slice(data_ + start, limit - start);
return policy_->KeyMayMatch(key, filter);
return true; // Errors are treated as potential matches
}
-}
+} // namespace leveldb
#include <stddef.h>
#include <stdint.h>
+
#include <string>
#include <vector>
+
#include "leveldb/slice.h"
#include "util/hash.h"
public:
explicit FilterBlockBuilder(const FilterPolicy*);
+ FilterBlockBuilder(const FilterBlockBuilder&) = delete;
+ FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete;
+
void StartBlock(uint64_t block_offset);
void AddKey(const Slice& key);
Slice Finish();
void GenerateFilter();
const FilterPolicy* policy_;
- std::string keys_; // Flattened key contents
- std::vector<size_t> start_; // Starting index in keys_ of each key
- std::string result_; // Filter data computed so far
- std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
+ std::string keys_; // Flattened key contents
+ std::vector<size_t> start_; // Starting index in keys_ of each key
+ std::string result_; // Filter data computed so far
+ std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
std::vector<uint32_t> filter_offsets_;
-
- // No copying allowed
- FilterBlockBuilder(const FilterBlockBuilder&);
- void operator=(const FilterBlockBuilder&);
};
class FilterBlockReader {
public:
- // REQUIRES: "contents" and *policy must stay live while *this is live.
+ // REQUIRES: "contents" and *policy must stay live while *this is live.
FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
bool KeyMayMatch(uint64_t block_offset, const Slice& key);
size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file)
};
-}
+} // namespace leveldb
#endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
// For testing: emit an array with one hash value per key
class TestHashFilter : public FilterPolicy {
public:
- virtual const char* Name() const {
- return "TestHashFilter";
- }
+ virtual const char* Name() const { return "TestHashFilter"; }
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
for (int i = 0; i < n; i++) {
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
- ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
+ ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
+ ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
}
TEST(FilterBlockTest, MultiChunk) {
// Check first filter
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
+ ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
// Check second filter
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
// Check third filter (empty)
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
- ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
+ ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
// Check last filter
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
- ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
- ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
+ ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
+ ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
}
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
}
Status BlockHandle::DecodeFrom(Slice* input) {
- if (GetVarint64(input, &offset_) &&
- GetVarint64(input, &size_)) {
+ if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) {
return Status::OK();
} else {
return Status::Corruption("bad block handle");
return result;
}
-Status ReadBlock(RandomAccessFile* file,
- const ReadOptions& options,
- const BlockHandle& handle,
- BlockContents* result) {
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+ const BlockHandle& handle, BlockContents* result) {
result->data = Slice();
result->cachable = false;
result->heap_allocated = false;
}
// Check the crc of the type and the block contents
- const char* data = contents.data(); // Pointer to where Read put the data
+ const char* data = contents.data(); // Pointer to where Read put the data
if (options.verify_checksums) {
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
const uint32_t actual = crc32c::Value(data, n + 1);
#ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
#define STORAGE_LEVELDB_TABLE_FORMAT_H_
-#include <string>
#include <stdint.h>
+
+#include <string>
+
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "leveldb/table_builder.h"
// block or a meta block.
class BlockHandle {
public:
+ // Maximum encoding length of a BlockHandle
+ enum { kMaxEncodedLength = 10 + 10 };
+
BlockHandle();
// The offset of the block in the file.
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input);
- // Maximum encoding length of a BlockHandle
- enum { kMaxEncodedLength = 10 + 10 };
-
private:
uint64_t offset_;
uint64_t size_;
// end of every table file.
class Footer {
public:
- Footer() { }
+ // Encoded length of a Footer. Note that the serialization of a
+ // Footer will always occupy exactly this many bytes. It consists
+ // of two block handles and a magic number.
+ enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
+
+ Footer() {}
// The block handle for the metaindex block of the table
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
// The block handle for the index block of the table
- const BlockHandle& index_handle() const {
- return index_handle_;
- }
- void set_index_handle(const BlockHandle& h) {
- index_handle_ = h;
- }
+ const BlockHandle& index_handle() const { return index_handle_; }
+ void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input);
- // Encoded length of a Footer. Note that the serialization of a
- // Footer will always occupy exactly this many bytes. It consists
- // of two block handles and a magic number.
- enum {
- kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8
- };
-
private:
BlockHandle metaindex_handle_;
BlockHandle index_handle_;
// Read the block identified by "handle" from "file". On failure
// return non-OK. On success fill *result and return OK.
-Status ReadBlock(RandomAccessFile* file,
- const ReadOptions& options,
- const BlockHandle& handle,
- BlockContents* result);
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+ const BlockHandle& handle, BlockContents* result);
// Implementation details follow. Clients should ignore,
inline BlockHandle::BlockHandle()
- : offset_(~static_cast<uint64_t>(0)),
- size_(~static_cast<uint64_t>(0)) {
-}
+ : offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {}
} // namespace leveldb
Iterator::~Iterator() {
if (!cleanup_head_.IsEmpty()) {
cleanup_head_.Run();
- for (CleanupNode* node = cleanup_head_.next; node != nullptr; ) {
+ for (CleanupNode* node = cleanup_head_.next; node != nullptr;) {
node->Run();
CleanupNode* next_node = node->next;
delete node;
class EmptyIterator : public Iterator {
public:
- EmptyIterator(const Status& s) : status_(s) { }
+ EmptyIterator(const Status& s) : status_(s) {}
~EmptyIterator() override = default;
bool Valid() const override { return false; }
- void Seek(const Slice& target) override { }
- void SeekToFirst() override { }
- void SeekToLast() override { }
+ void Seek(const Slice& target) override {}
+ void SeekToFirst() override {}
+ void SeekToLast() override {}
void Next() override { assert(false); }
void Prev() override { assert(false); }
- Slice key() const override { assert(false); return Slice(); }
- Slice value() const override { assert(false); return Slice(); }
+ Slice key() const override {
+ assert(false);
+ return Slice();
+ }
+ Slice value() const override {
+ assert(false);
+ return Slice();
+ }
Status status() const override { return status_; }
private:
} // anonymous namespace
-Iterator* NewEmptyIterator() {
- return new EmptyIterator(Status::OK());
-}
+Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); }
Iterator* NewErrorIterator(const Status& status) {
return new EmptyIterator(status);
// cache locality.
class IteratorWrapper {
public:
- IteratorWrapper(): iter_(nullptr), valid_(false) { }
- explicit IteratorWrapper(Iterator* iter): iter_(nullptr) {
- Set(iter);
- }
+ IteratorWrapper() : iter_(nullptr), valid_(false) {}
+ explicit IteratorWrapper(Iterator* iter) : iter_(nullptr) { Set(iter); }
~IteratorWrapper() { delete iter_; }
Iterator* iter() const { return iter_; }
}
}
-
// Iterator interface methods
- bool Valid() const { return valid_; }
- Slice key() const { assert(Valid()); return key_; }
- Slice value() const { assert(Valid()); return iter_->value(); }
+ bool Valid() const { return valid_; }
+ Slice key() const {
+ assert(Valid());
+ return key_;
+ }
+ Slice value() const {
+ assert(Valid());
+ return iter_->value();
+ }
// Methods below require iter() != nullptr
- Status status() const { assert(iter_); return iter_->status(); }
- void Next() { assert(iter_); iter_->Next(); Update(); }
- void Prev() { assert(iter_); iter_->Prev(); Update(); }
- void Seek(const Slice& k) { assert(iter_); iter_->Seek(k); Update(); }
- void SeekToFirst() { assert(iter_); iter_->SeekToFirst(); Update(); }
- void SeekToLast() { assert(iter_); iter_->SeekToLast(); Update(); }
+ Status status() const {
+ assert(iter_);
+ return iter_->status();
+ }
+ void Next() {
+ assert(iter_);
+ iter_->Next();
+ Update();
+ }
+ void Prev() {
+ assert(iter_);
+ iter_->Prev();
+ Update();
+ }
+ void Seek(const Slice& k) {
+ assert(iter_);
+ iter_->Seek(k);
+ Update();
+ }
+ void SeekToFirst() {
+ assert(iter_);
+ iter_->SeekToFirst();
+ Update();
+ }
+ void SeekToLast() {
+ assert(iter_);
+ iter_->SeekToLast();
+ Update();
+ }
private:
void Update() {
}
}
- virtual ~MergingIterator() {
- delete[] children_;
- }
+ virtual ~MergingIterator() { delete[] children_; }
- virtual bool Valid() const {
- return (current_ != nullptr);
- }
+ virtual bool Valid() const { return (current_ != nullptr); }
virtual void SeekToFirst() {
for (int i = 0; i < n_; i++) {
}
private:
+ // Which direction is the iterator moving?
+ enum Direction { kForward, kReverse };
+
void FindSmallest();
void FindLargest();
IteratorWrapper* children_;
int n_;
IteratorWrapper* current_;
-
- // Which direction is the iterator moving?
- enum Direction {
- kForward,
- kReverse
- };
Direction direction_;
};
void MergingIterator::FindLargest() {
IteratorWrapper* largest = nullptr;
- for (int i = n_-1; i >= 0; i--) {
+ for (int i = n_ - 1; i >= 0; i--) {
IteratorWrapper* child = &children_[i];
if (child->Valid()) {
if (largest == nullptr) {
}
} // namespace
-Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n) {
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+ int n) {
assert(n >= 0);
if (n == 0) {
return NewEmptyIterator();
} else if (n == 1) {
- return list[0];
+ return children[0];
} else {
- return new MergingIterator(cmp, list, n);
+ return new MergingIterator(comparator, children, n);
}
}
// key is present in K child iterators, it will be yielded K times.
//
// REQUIRES: n >= 0
-Iterator* NewMergingIterator(
- const Comparator* comparator, Iterator** children, int n);
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+ int n);
} // namespace leveldb
struct Table::Rep {
~Rep() {
delete filter;
- delete [] filter_data;
+ delete[] filter_data;
delete index_block;
}
Block* index_block;
};
-Status Table::Open(const Options& options,
- RandomAccessFile* file,
- uint64_t size,
- Table** table) {
+Status Table::Open(const Options& options, RandomAccessFile* file,
+ uint64_t size, Table** table) {
*table = nullptr;
if (size < Footer::kEncodedLength) {
return Status::Corruption("file is too short to be an sstable");
return;
}
if (block.heap_allocated) {
- rep_->filter_data = block.data.data(); // Will need to delete later
+ rep_->filter_data = block.data.data(); // Will need to delete later
}
rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
}
-Table::~Table() {
- delete rep_;
-}
+Table::~Table() { delete rep_; }
static void DeleteBlock(void* arg, void* ignored) {
delete reinterpret_cast<Block*>(arg);
// Convert an index iterator value (i.e., an encoded BlockHandle)
// into an iterator over the contents of the corresponding block.
-Iterator* Table::BlockReader(void* arg,
- const ReadOptions& options,
+Iterator* Table::BlockReader(void* arg, const ReadOptions& options,
const Slice& index_value) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
if (block_cache != nullptr) {
char cache_key_buffer[16];
EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
- EncodeFixed64(cache_key_buffer+8, handle.offset());
+ EncodeFixed64(cache_key_buffer + 8, handle.offset());
Slice key(cache_key_buffer, sizeof(cache_key_buffer));
cache_handle = block_cache->Lookup(key);
if (cache_handle != nullptr) {
if (s.ok()) {
block = new Block(contents);
if (contents.cachable && options.fill_cache) {
- cache_handle = block_cache->Insert(
- key, block, block->size(), &DeleteCachedBlock);
+ cache_handle = block_cache->Insert(key, block, block->size(),
+ &DeleteCachedBlock);
}
}
}
&Table::BlockReader, const_cast<Table*>(this), options);
}
-Status Table::InternalGet(const ReadOptions& options, const Slice& k,
- void* arg,
- void (*saver)(void*, const Slice&, const Slice&)) {
+Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg,
+ void (*handle_result)(void*, const Slice&,
+ const Slice&)) {
Status s;
Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
iiter->Seek(k);
Slice handle_value = iiter->value();
FilterBlockReader* filter = rep_->filter;
BlockHandle handle;
- if (filter != nullptr &&
- handle.DecodeFrom(&handle_value).ok() &&
+ if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() &&
!filter->KeyMayMatch(handle.offset(), k)) {
// Not found
} else {
Iterator* block_iter = BlockReader(this, options, iiter->value());
block_iter->Seek(k);
if (block_iter->Valid()) {
- (*saver)(arg, block_iter->key(), block_iter->value());
+ (*handle_result)(arg, block_iter->key(), block_iter->value());
}
s = block_iter->status();
delete block_iter;
return s;
}
-
uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
Iterator* index_iter =
rep_->index_block->NewIterator(rep_->options.comparator);
#include "leveldb/table_builder.h"
#include <assert.h>
+
#include "leveldb/comparator.h"
#include "leveldb/env.h"
#include "leveldb/filter_policy.h"
namespace leveldb {
struct TableBuilder::Rep {
+ Rep(const Options& opt, WritableFile* f)
+ : options(opt),
+ index_block_options(opt),
+ file(f),
+ offset(0),
+ data_block(&options),
+ index_block(&index_block_options),
+ num_entries(0),
+ closed(false),
+ filter_block(opt.filter_policy == nullptr
+ ? nullptr
+ : new FilterBlockBuilder(opt.filter_policy)),
+ pending_index_entry(false) {
+ index_block_options.block_restart_interval = 1;
+ }
+
Options options;
Options index_block_options;
WritableFile* file;
BlockBuilder index_block;
std::string last_key;
int64_t num_entries;
- bool closed; // Either Finish() or Abandon() has been called.
+ bool closed; // Either Finish() or Abandon() has been called.
FilterBlockBuilder* filter_block;
// We do not emit the index entry for a block until we have seen the
BlockHandle pending_handle; // Handle to add to index block
std::string compressed_output;
-
- Rep(const Options& opt, WritableFile* f)
- : options(opt),
- index_block_options(opt),
- file(f),
- offset(0),
- data_block(&options),
- index_block(&index_block_options),
- num_entries(0),
- closed(false),
- filter_block(opt.filter_policy == nullptr ? nullptr
- : new FilterBlockBuilder(opt.filter_policy)),
- pending_index_entry(false) {
- index_block_options.block_restart_interval = 1;
- }
};
TableBuilder::TableBuilder(const Options& options, WritableFile* file)
}
void TableBuilder::WriteRawBlock(const Slice& block_contents,
- CompressionType type,
- BlockHandle* handle) {
+ CompressionType type, BlockHandle* handle) {
Rep* r = rep_;
handle->set_offset(r->offset);
handle->set_size(block_contents.size());
trailer[0] = type;
uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size());
crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type
- EncodeFixed32(trailer+1, crc32c::Mask(crc));
+ EncodeFixed32(trailer + 1, crc32c::Mask(crc));
r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
if (r->status.ok()) {
r->offset += block_contents.size() + kBlockTrailerSize;
}
}
-Status TableBuilder::status() const {
- return rep_->status;
-}
+Status TableBuilder::status() const { return rep_->status; }
Status TableBuilder::Finish() {
Rep* r = rep_;
r->closed = true;
}
-uint64_t TableBuilder::NumEntries() const {
- return rep_->num_entries;
-}
+uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; }
-uint64_t TableBuilder::FileSize() const {
- return rep_->offset;
-}
+uint64_t TableBuilder::FileSize() const { return rep_->offset; }
} // namespace leveldb
#include <map>
#include <string>
+
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
static std::string Reverse(const Slice& key) {
std::string str(key.ToString());
std::string rev("");
- for (std::string::reverse_iterator rit = str.rbegin();
- rit != str.rend(); ++rit) {
+ for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend();
+ ++rit) {
rev.push_back(*rit);
}
return rev;
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
}
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+ virtual void FindShortestSeparator(std::string* start,
+ const Slice& limit) const {
std::string s = Reverse(*start);
std::string l = Reverse(limit);
BytewiseComparator()->FindShortestSeparator(&s, l);
struct STLLessThan {
const Comparator* cmp;
- STLLessThan() : cmp(BytewiseComparator()) { }
- STLLessThan(const Comparator* c) : cmp(c) { }
+ STLLessThan() : cmp(BytewiseComparator()) {}
+ STLLessThan(const Comparator* c) : cmp(c) {}
bool operator()(const std::string& a, const std::string& b) const {
return cmp->Compare(Slice(a), Slice(b)) < 0;
}
};
} // namespace
-class StringSink: public WritableFile {
+class StringSink : public WritableFile {
public:
- ~StringSink() { }
+ ~StringSink() {}
const std::string& contents() const { return contents_; }
std::string contents_;
};
-
-class StringSource: public RandomAccessFile {
+class StringSource : public RandomAccessFile {
public:
StringSource(const Slice& contents)
- : contents_(contents.data(), contents.size()) {
- }
+ : contents_(contents.data(), contents.size()) {}
- virtual ~StringSource() { }
+ virtual ~StringSource() {}
uint64_t Size() const { return contents_.size(); }
virtual Status Read(uint64_t offset, size_t n, Slice* result,
- char* scratch) const {
- if (offset > contents_.size()) {
+ char* scratch) const {
+ if (offset >= contents_.size()) {
return Status::InvalidArgument("invalid Read offset");
}
if (offset + n > contents_.size()) {
// BlockBuilder/TableBuilder and Block/Table.
class Constructor {
public:
- explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { }
- virtual ~Constructor() { }
+ explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
+ virtual ~Constructor() {}
void Add(const std::string& key, const Slice& value) {
data_[key] = value.ToString();
// Finish constructing the data structure with all the keys that have
// been added so far. Returns the keys in sorted order in "*keys"
// and stores the key/value pairs in "*kvmap"
- void Finish(const Options& options,
- std::vector<std::string>* keys,
+ void Finish(const Options& options, std::vector<std::string>* keys,
KVMap* kvmap) {
*kvmap = data_;
keys->clear();
- for (KVMap::const_iterator it = data_.begin();
- it != data_.end();
- ++it) {
+ for (KVMap::const_iterator it = data_.begin(); it != data_.end(); ++it) {
keys->push_back(it->first);
}
data_.clear();
KVMap data_;
};
-class BlockConstructor: public Constructor {
+class BlockConstructor : public Constructor {
public:
explicit BlockConstructor(const Comparator* cmp)
- : Constructor(cmp),
- comparator_(cmp),
- block_(nullptr) { }
- ~BlockConstructor() {
- delete block_;
- }
+ : Constructor(cmp), comparator_(cmp), block_(nullptr) {}
+ ~BlockConstructor() { delete block_; }
virtual Status FinishImpl(const Options& options, const KVMap& data) {
delete block_;
block_ = nullptr;
BlockBuilder builder(&options);
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
+ for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
builder.Add(it->first, it->second);
}
// Open the block
BlockConstructor();
};
-class TableConstructor: public Constructor {
+class TableConstructor : public Constructor {
public:
TableConstructor(const Comparator* cmp)
- : Constructor(cmp),
- source_(nullptr), table_(nullptr) {
- }
- ~TableConstructor() {
- Reset();
- }
+ : Constructor(cmp), source_(nullptr), table_(nullptr) {}
+ ~TableConstructor() { Reset(); }
virtual Status FinishImpl(const Options& options, const KVMap& data) {
Reset();
StringSink sink;
TableBuilder builder(options, &sink);
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
+ for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
builder.Add(it->first, it->second);
ASSERT_TRUE(builder.status().ok());
}
};
// A helper class that converts internal format keys into user keys
-class KeyConvertingIterator: public Iterator {
+class KeyConvertingIterator : public Iterator {
public:
- explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) { }
+ explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
virtual ~KeyConvertingIterator() { delete iter_; }
virtual bool Valid() const { return iter_->Valid(); }
virtual void Seek(const Slice& target) {
void operator=(const KeyConvertingIterator&);
};
-class MemTableConstructor: public Constructor {
+class MemTableConstructor : public Constructor {
public:
explicit MemTableConstructor(const Comparator* cmp)
- : Constructor(cmp),
- internal_comparator_(cmp) {
+ : Constructor(cmp), internal_comparator_(cmp) {
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
}
- ~MemTableConstructor() {
- memtable_->Unref();
- }
+ ~MemTableConstructor() { memtable_->Unref(); }
virtual Status FinishImpl(const Options& options, const KVMap& data) {
memtable_->Unref();
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
int seq = 1;
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
+ for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
memtable_->Add(seq, kTypeValue, it->first, it->second);
seq++;
}
MemTable* memtable_;
};
-class DBConstructor: public Constructor {
+class DBConstructor : public Constructor {
public:
explicit DBConstructor(const Comparator* cmp)
- : Constructor(cmp),
- comparator_(cmp) {
+ : Constructor(cmp), comparator_(cmp) {
db_ = nullptr;
NewDB();
}
- ~DBConstructor() {
- delete db_;
- }
+ ~DBConstructor() { delete db_; }
virtual Status FinishImpl(const Options& options, const KVMap& data) {
delete db_;
db_ = nullptr;
NewDB();
- for (KVMap::const_iterator it = data.begin();
- it != data.end();
- ++it) {
+ for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
WriteBatch batch;
batch.Put(it->first, it->second);
ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
DB* db_;
};
-enum TestType {
- TABLE_TEST,
- BLOCK_TEST,
- MEMTABLE_TEST,
- DB_TEST
-};
+enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST };
struct TestArgs {
TestType type;
};
static const TestArgs kTestArgList[] = {
- { TABLE_TEST, false, 16 },
- { TABLE_TEST, false, 1 },
- { TABLE_TEST, false, 1024 },
- { TABLE_TEST, true, 16 },
- { TABLE_TEST, true, 1 },
- { TABLE_TEST, true, 1024 },
-
- { BLOCK_TEST, false, 16 },
- { BLOCK_TEST, false, 1 },
- { BLOCK_TEST, false, 1024 },
- { BLOCK_TEST, true, 16 },
- { BLOCK_TEST, true, 1 },
- { BLOCK_TEST, true, 1024 },
-
- // Restart interval does not matter for memtables
- { MEMTABLE_TEST, false, 16 },
- { MEMTABLE_TEST, true, 16 },
-
- // Do not bother with restart interval variations for DB
- { DB_TEST, false, 16 },
- { DB_TEST, true, 16 },
+ {TABLE_TEST, false, 16},
+ {TABLE_TEST, false, 1},
+ {TABLE_TEST, false, 1024},
+ {TABLE_TEST, true, 16},
+ {TABLE_TEST, true, 1},
+ {TABLE_TEST, true, 1024},
+
+ {BLOCK_TEST, false, 16},
+ {BLOCK_TEST, false, 1},
+ {BLOCK_TEST, false, 1024},
+ {BLOCK_TEST, true, 16},
+ {BLOCK_TEST, true, 1},
+ {BLOCK_TEST, true, 1024},
+
+ // Restart interval does not matter for memtables
+ {MEMTABLE_TEST, false, 16},
+ {MEMTABLE_TEST, true, 16},
+
+ // Do not bother with restart interval variations for DB
+ {DB_TEST, false, 16},
+ {DB_TEST, true, 16},
};
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
class Harness {
public:
- Harness() : constructor_(nullptr) { }
+ Harness() : constructor_(nullptr) {}
void Init(const TestArgs& args) {
delete constructor_;
}
}
- ~Harness() {
- delete constructor_;
- }
+ ~Harness() { delete constructor_; }
void Add(const std::string& key, const std::string& value) {
constructor_->Add(key, value);
ASSERT_TRUE(!iter->Valid());
iter->SeekToFirst();
for (KVMap::const_iterator model_iter = data.begin();
- model_iter != data.end();
- ++model_iter) {
+ model_iter != data.end(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Next();
}
ASSERT_TRUE(!iter->Valid());
iter->SeekToLast();
for (KVMap::const_reverse_iterator model_iter = data.rbegin();
- model_iter != data.rend();
- ++model_iter) {
+ model_iter != data.rend(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Prev();
}
delete iter;
}
- void TestRandomAccess(Random* rnd,
- const std::vector<std::string>& keys,
+ void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
const KVMap& data) {
static const bool kVerbose = false;
Iterator* iter = constructor_->NewIterator();
case 2: {
std::string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
- if (kVerbose) fprintf(stderr, "Seek '%s'\n",
- EscapeString(key).c_str());
+ if (kVerbose)
+ fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
iter->Seek(Slice(key));
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
if (kVerbose) fprintf(stderr, "Prev\n");
iter->Prev();
if (model_iter == data.begin()) {
- model_iter = data.end(); // Wrap around to invalid value
+ model_iter = data.end(); // Wrap around to invalid value
} else {
--model_iter;
}
break;
case 1: {
// Attempt to return something smaller than an existing key
- if (result.size() > 0 && result[result.size()-1] > '\0') {
- result[result.size()-1]--;
+ if (!result.empty() && result[result.size() - 1] > '\0') {
+ result[result.size() - 1]--;
}
break;
}
for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) {
- fprintf(stderr, "case %d of %d: num_entries = %d\n",
- (i + 1), int(kNumTestArgs), num_entries);
+ fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
+ int(kNumTestArgs), num_entries);
}
for (int e = 0; e < num_entries; e++) {
std::string v;
TEST(Harness, RandomizedLongDB) {
Random rnd(test::RandomSeed());
- TestArgs args = { DB_TEST, false, 16 };
+ TestArgs args = {DB_TEST, false, 16};
Init(args);
int num_entries = 100000;
for (int e = 0; e < num_entries; e++) {
ASSERT_GT(files, 0);
}
-class MemTableTest { };
+class MemTableTest {};
TEST(MemTableTest, Simple) {
InternalKeyComparator cmp(BytewiseComparator());
Iterator* iter = memtable->NewIterator();
iter->SeekToFirst();
while (iter->Valid()) {
- fprintf(stderr, "key: '%s' -> '%s'\n",
- iter->key().ToString().c_str(),
+ fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
iter->value().ToString().c_str());
iter->Next();
}
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
- (unsigned long long)(val),
- (unsigned long long)(low),
+ (unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
}
-class TableTest { };
+class TableTest {};
TEST(TableTest, ApproximateOffsetOfPlain) {
TableConstructor c(BytewiseComparator());
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
- ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
-
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
+ ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
}
static bool SnappyCompressionSupported() {
// Expected upper and lower bounds of space used by compressible strings.
static const int kSlop = 1000; // Compressor effectiveness varies.
- const int expected = 2500; // 10000 * compression ratio (0.25)
+ const int expected = 2500; // 10000 * compression ratio (0.25)
const int min_z = expected - kSlop;
const int max_z = expected + kSlop;
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&);
-class TwoLevelIterator: public Iterator {
+class TwoLevelIterator : public Iterator {
public:
- TwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options);
+ TwoLevelIterator(Iterator* index_iter, BlockFunction block_function,
+ void* arg, const ReadOptions& options);
virtual ~TwoLevelIterator();
virtual void Next();
virtual void Prev();
- virtual bool Valid() const {
- return data_iter_.Valid();
- }
+ virtual bool Valid() const { return data_iter_.Valid(); }
virtual Slice key() const {
assert(Valid());
return data_iter_.key();
const ReadOptions options_;
Status status_;
IteratorWrapper index_iter_;
- IteratorWrapper data_iter_; // May be nullptr
+ IteratorWrapper data_iter_; // May be nullptr
// If data_iter_ is non-null, then "data_block_handle_" holds the
// "index_value" passed to block_function_ to create the data_iter_.
std::string data_block_handle_;
};
-TwoLevelIterator::TwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options)
+TwoLevelIterator::TwoLevelIterator(Iterator* index_iter,
+ BlockFunction block_function, void* arg,
+ const ReadOptions& options)
: block_function_(block_function),
arg_(arg),
options_(options),
index_iter_(index_iter),
- data_iter_(nullptr) {
-}
+ data_iter_(nullptr) {}
-TwoLevelIterator::~TwoLevelIterator() {
-}
+TwoLevelIterator::~TwoLevelIterator() {}
void TwoLevelIterator::Seek(const Slice& target) {
index_iter_.Seek(target);
SkipEmptyDataBlocksBackward();
}
-
void TwoLevelIterator::SkipEmptyDataBlocksForward() {
while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
// Move to next block
SetDataIterator(nullptr);
} else {
Slice handle = index_iter_.value();
- if (data_iter_.iter() != nullptr && handle.compare(data_block_handle_) == 0) {
+ if (data_iter_.iter() != nullptr &&
+ handle.compare(data_block_handle_) == 0) {
// data_iter_ is already constructed with this iterator, so
// no need to change anything
} else {
} // namespace
-Iterator* NewTwoLevelIterator(
- Iterator* index_iter,
- BlockFunction block_function,
- void* arg,
- const ReadOptions& options) {
+Iterator* NewTwoLevelIterator(Iterator* index_iter,
+ BlockFunction block_function, void* arg,
+ const ReadOptions& options) {
return new TwoLevelIterator(index_iter, block_function, arg, options);
}
// an iterator over the contents of the corresponding block.
Iterator* NewTwoLevelIterator(
Iterator* index_iter,
- Iterator* (*block_function)(
- void* arg,
- const ReadOptions& options,
- const Slice& index_value),
- void* arg,
- const ReadOptions& options);
+ Iterator* (*block_function)(void* arg, const ReadOptions& options,
+ const Slice& index_value),
+ void* arg, const ReadOptions& options);
} // namespace leveldb
static const int kBlockSize = 4096;
-Arena::Arena() : memory_usage_(0) {
- alloc_ptr_ = nullptr; // First allocation will allocate a block
- alloc_bytes_remaining_ = 0;
-}
+Arena::Arena()
+ : alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
Arena::~Arena() {
for (size_t i = 0; i < blocks_.size(); i++) {
char* Arena::AllocateAligned(size_t bytes) {
const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
- assert((align & (align-1)) == 0); // Pointer size should be a power of 2
- size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1);
+ static_assert((align & (align - 1)) == 0,
+ "Pointer size should be a power of 2");
+ size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
size_t needed = bytes + slop;
char* result;
// AllocateFallback always returned aligned memory
result = AllocateFallback(bytes);
}
- assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0);
+ assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
return result;
}
class Arena {
public:
Arena();
+
+ Arena(const Arena&) = delete;
+ Arena& operator=(const Arena&) = delete;
+
~Arena();
// Return a pointer to a newly allocated memory block of "bytes" bytes.
// TODO(costan): This member is accessed via atomics, but the others are
// accessed without any locking. Is this OK?
std::atomic<size_t> memory_usage_;
-
- // No copying allowed
- Arena(const Arena&);
- void operator=(const Arena&);
};
inline char* Arena::Allocate(size_t bytes) {
namespace leveldb {
-class ArenaTest { };
+class ArenaTest {};
-TEST(ArenaTest, Empty) {
- Arena arena;
-}
+TEST(ArenaTest, Empty) { Arena arena; }
TEST(ArenaTest, Simple) {
std::vector<std::pair<size_t, char*> > allocated;
if (i % (N / 10) == 0) {
s = i;
} else {
- s = rnd.OneIn(4000) ? rnd.Uniform(6000) :
- (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
+ s = rnd.OneIn(4000)
+ ? rnd.Uniform(6000)
+ : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
}
if (s == 0) {
// Our arena disallows size 0 allocations.
bytes += s;
allocated.push_back(std::make_pair(s, r));
ASSERT_GE(arena.MemoryUsage(), bytes);
- if (i > N/10) {
+ if (i > N / 10) {
ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
}
}
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
}
class BloomFilterPolicy : public FilterPolicy {
- private:
- size_t bits_per_key_;
- size_t k_;
-
public:
- explicit BloomFilterPolicy(int bits_per_key)
- : bits_per_key_(bits_per_key) {
+ explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
// We intentionally round down to reduce probing cost a little bit
k_ = static_cast<size_t>(bits_per_key * 0.69); // 0.69 =~ ln(2)
if (k_ < 1) k_ = 1;
if (k_ > 30) k_ = 30;
}
- virtual const char* Name() const {
- return "leveldb.BuiltinBloomFilter2";
- }
+ virtual const char* Name() const { return "leveldb.BuiltinBloomFilter2"; }
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
// Compute bloom filter size (in both bits and bytes)
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
for (size_t j = 0; j < k_; j++) {
const uint32_t bitpos = h % bits;
- array[bitpos/8] |= (1 << (bitpos % 8));
+ array[bitpos / 8] |= (1 << (bitpos % 8));
h += delta;
}
}
// Use the encoded k so that we can read filters generated by
// bloom filters created using different parameters.
- const size_t k = array[len-1];
+ const size_t k = array[len - 1];
if (k > 30) {
// Reserved for potentially new encodings for short bloom filters.
// Consider it a match.
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
for (size_t j = 0; j < k; j++) {
const uint32_t bitpos = h % bits;
- if ((array[bitpos/8] & (1 << (bitpos % 8))) == 0) return false;
+ if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
h += delta;
}
return true;
}
+
+ private:
+ size_t bits_per_key_;
+ size_t k_;
};
-}
+} // namespace
const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
return new BloomFilterPolicy(bits_per_key);
}
class BloomTest {
- private:
- const FilterPolicy* policy_;
- std::string filter_;
- std::vector<std::string> keys_;
-
public:
- BloomTest() : policy_(NewBloomFilterPolicy(10)) { }
+ BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
- ~BloomTest() {
- delete policy_;
- }
+ ~BloomTest() { delete policy_; }
void Reset() {
keys_.clear();
filter_.clear();
}
- void Add(const Slice& s) {
- keys_.push_back(s.ToString());
- }
+ void Add(const Slice& s) { keys_.push_back(s.ToString()); }
void Build() {
std::vector<Slice> key_slices;
if (kVerbose >= 2) DumpFilter();
}
- size_t FilterSize() const {
- return filter_.size();
- }
+ size_t FilterSize() const { return filter_.size(); }
void DumpFilter() {
fprintf(stderr, "F(");
- for (size_t i = 0; i+1 < filter_.size(); i++) {
+ for (size_t i = 0; i + 1 < filter_.size(); i++) {
const unsigned int c = static_cast<unsigned int>(filter_[i]);
for (int j = 0; j < 8; j++) {
- fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.');
+ fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
}
}
fprintf(stderr, ")\n");
}
return result / 10000.0;
}
+
+ private:
+ const FilterPolicy* policy_;
+ std::string filter_;
+ std::vector<std::string> keys_;
};
TEST(BloomTest, EmptyFilter) {
- ASSERT_TRUE(! Matches("hello"));
- ASSERT_TRUE(! Matches("world"));
+ ASSERT_TRUE(!Matches("hello"));
+ ASSERT_TRUE(!Matches("world"));
}
TEST(BloomTest, Small) {
Add("world");
ASSERT_TRUE(Matches("hello"));
ASSERT_TRUE(Matches("world"));
- ASSERT_TRUE(! Matches("x"));
- ASSERT_TRUE(! Matches("foo"));
+ ASSERT_TRUE(!Matches("x"));
+ ASSERT_TRUE(!Matches("foo"));
}
static int NextLength(int length) {
double rate = FalsePositiveRate();
if (kVerbose >= 1) {
fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
- rate*100.0, length, static_cast<int>(FilterSize()));
+ rate * 100.0, length, static_cast<int>(FilterSize()));
}
- ASSERT_LE(rate, 0.02); // Must not be over 2%
- if (rate > 0.0125) mediocre_filters++; // Allowed, but not too often
- else good_filters++;
+ ASSERT_LE(rate, 0.02); // Must not be over 2%
+ if (rate > 0.0125)
+ mediocre_filters++; // Allowed, but not too often
+ else
+ good_filters++;
}
if (kVerbose >= 1) {
- fprintf(stderr, "Filters: %d good, %d mediocre\n",
- good_filters, mediocre_filters);
+ fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
+ mediocre_filters);
}
- ASSERT_LE(mediocre_filters, good_filters/5);
+ ASSERT_LE(mediocre_filters, good_filters / 5);
}
// Different bits-per-byte
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
namespace leveldb {
-Cache::~Cache() {
-}
+Cache::~Cache() {}
namespace {
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
- size_t charge; // TODO(opt): Only allow uint32_t?
+ size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
- bool in_cache; // Whether entry is in the cache.
- uint32_t refs; // References, including cache reference, if present.
- uint32_t hash; // Hash of key(); used for fast sharding and comparisons
- char key_data[1]; // Beginning of key
+ bool in_cache; // Whether entry is in the cache.
+ uint32_t refs; // References, including cache reference, if present.
+ uint32_t hash; // Hash of key(); used for fast sharding and comparisons
+ char key_data[1]; // Beginning of key
Slice key() const {
// next_ is only equal to this if the LRU handle is the list head of an
// pointer to the trailing slot in the corresponding linked list.
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
- while (*ptr != nullptr &&
- ((*ptr)->hash != hash || key != (*ptr)->key())) {
+ while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
void SetCapacity(size_t capacity) { capacity_ = capacity; }
// Like Cache methods, but with an extra "hash" parameter.
- Cache::Handle* Insert(const Slice& key, uint32_t hash,
- void* value, size_t charge,
+ Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
private:
void LRU_Remove(LRUHandle* e);
- void LRU_Append(LRUHandle*list, LRUHandle* e);
+ void LRU_Append(LRUHandle* list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
HandleTable table_ GUARDED_BY(mutex_);
};
-LRUCache::LRUCache()
- : usage_(0) {
+LRUCache::LRUCache() : capacity_(0), usage_(0) {
// Make empty circular linked lists.
lru_.next = &lru_;
lru_.prev = &lru_;
LRUCache::~LRUCache() {
assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle
- for (LRUHandle* e = lru_.next; e != &lru_; ) {
+ for (LRUHandle* e = lru_.next; e != &lru_;) {
LRUHandle* next = e->next;
assert(e->in_cache);
e->in_cache = false;
Unref(reinterpret_cast<LRUHandle*>(handle));
}
-Cache::Handle* LRUCache::Insert(
- const Slice& key, uint32_t hash, void* value, size_t charge,
- void (*deleter)(const Slice& key, void* value)) {
+Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
+ size_t charge,
+ void (*deleter)(const Slice& key,
+ void* value)) {
MutexLock l(&mutex_);
- LRUHandle* e = reinterpret_cast<LRUHandle*>(
- malloc(sizeof(LRUHandle)-1 + key.size()));
+ LRUHandle* e =
+ reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
return Hash(s.data(), s.size(), 0);
}
- static uint32_t Shard(uint32_t hash) {
- return hash >> (32 - kNumShardBits);
- }
+ static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
public:
- explicit ShardedLRUCache(size_t capacity)
- : last_id_(0) {
+ explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
for (int s = 0; s < kNumShards; s++) {
shard_[s].SetCapacity(per_shard);
}
}
- virtual ~ShardedLRUCache() { }
+ virtual ~ShardedLRUCache() {}
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) {
const uint32_t hash = HashSlice(key);
} // end anonymous namespace
-Cache* NewLRUCache(size_t capacity) {
- return new ShardedLRUCache(capacity);
-}
+Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
} // namespace leveldb
class CacheTest {
public:
- static CacheTest* current_;
-
static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v));
std::vector<int> deleted_values_;
Cache* cache_;
- CacheTest() : cache_(NewLRUCache(kCacheSize)) {
- current_ = this;
- }
+ CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
- ~CacheTest() {
- delete cache_;
- }
+ ~CacheTest() { delete cache_; }
int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
&CacheTest::Deleter);
}
- void Erase(int key) {
- cache_->Erase(EncodeKey(key));
- }
+ void Erase(int key) { cache_->Erase(EncodeKey(key)); }
+
+ static CacheTest* current_;
};
CacheTest* CacheTest::current_;
Insert(100, 101);
ASSERT_EQ(101, Lookup(100));
- ASSERT_EQ(-1, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(200));
+ ASSERT_EQ(-1, Lookup(300));
Insert(200, 201);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(201, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(300));
Insert(100, 102);
ASSERT_EQ(102, Lookup(100));
ASSERT_EQ(201, Lookup(200));
- ASSERT_EQ(-1, Lookup(300));
+ ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
Insert(100, 101);
Insert(200, 201);
Erase(100);
- ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
- ASSERT_EQ(-1, Lookup(100));
+ ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
}
// Frequently used entry must be kept around,
// as must things that are still in use.
for (int i = 0; i < kCacheSize + 100; i++) {
- Insert(1000+i, 2000+i);
- ASSERT_EQ(2000+i, Lookup(1000+i));
+ Insert(1000 + i, 2000 + i);
+ ASSERT_EQ(2000 + i, Lookup(1000 + i));
ASSERT_EQ(101, Lookup(100));
}
ASSERT_EQ(101, Lookup(100));
// Overfill the cache, keeping handles on all inserted entries.
std::vector<Cache::Handle*> h;
for (int i = 0; i < kCacheSize + 100; i++) {
- h.push_back(InsertAndReturnHandle(1000+i, 2000+i));
+ h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
}
// Check that all the entries can be found in the cache.
for (int i = 0; i < h.size(); i++) {
- ASSERT_EQ(2000+i, Lookup(1000+i));
+ ASSERT_EQ(2000 + i, Lookup(1000 + i));
}
for (int i = 0; i < h.size(); i++) {
const int kHeavy = 10;
int added = 0;
int index = 0;
- while (added < 2*kCacheSize) {
+ while (added < 2 * kCacheSize) {
const int weight = (index & 1) ? kLight : kHeavy;
- Insert(index, 1000+index, weight);
+ Insert(index, 1000 + index, weight);
added += weight;
index++;
}
int r = Lookup(i);
if (r >= 0) {
cached_weight += weight;
- ASSERT_EQ(1000+i, r);
+ ASSERT_EQ(1000 + i, r);
}
}
- ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
+ ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
}
TEST(CacheTest, NewId) {
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
namespace leveldb {
-void EncodeFixed32(char* buf, uint32_t value) {
+void EncodeFixed32(char* dst, uint32_t value) {
if (port::kLittleEndian) {
- memcpy(buf, &value, sizeof(value));
+ memcpy(dst, &value, sizeof(value));
} else {
- buf[0] = value & 0xff;
- buf[1] = (value >> 8) & 0xff;
- buf[2] = (value >> 16) & 0xff;
- buf[3] = (value >> 24) & 0xff;
+ dst[0] = value & 0xff;
+ dst[1] = (value >> 8) & 0xff;
+ dst[2] = (value >> 16) & 0xff;
+ dst[3] = (value >> 24) & 0xff;
}
}
-void EncodeFixed64(char* buf, uint64_t value) {
+void EncodeFixed64(char* dst, uint64_t value) {
if (port::kLittleEndian) {
- memcpy(buf, &value, sizeof(value));
+ memcpy(dst, &value, sizeof(value));
} else {
- buf[0] = value & 0xff;
- buf[1] = (value >> 8) & 0xff;
- buf[2] = (value >> 16) & 0xff;
- buf[3] = (value >> 24) & 0xff;
- buf[4] = (value >> 32) & 0xff;
- buf[5] = (value >> 40) & 0xff;
- buf[6] = (value >> 48) & 0xff;
- buf[7] = (value >> 56) & 0xff;
+ dst[0] = value & 0xff;
+ dst[1] = (value >> 8) & 0xff;
+ dst[2] = (value >> 16) & 0xff;
+ dst[3] = (value >> 24) & 0xff;
+ dst[4] = (value >> 32) & 0xff;
+ dst[5] = (value >> 40) & 0xff;
+ dst[6] = (value >> 48) & 0xff;
+ dst[7] = (value >> 56) & 0xff;
}
}
// Operate on characters as unsigneds
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
static const int B = 128;
- if (v < (1<<7)) {
+ if (v < (1 << 7)) {
*(ptr++) = v;
- } else if (v < (1<<14)) {
+ } else if (v < (1 << 14)) {
*(ptr++) = v | B;
- *(ptr++) = v>>7;
- } else if (v < (1<<21)) {
+ *(ptr++) = v >> 7;
+ } else if (v < (1 << 21)) {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = v>>14;
- } else if (v < (1<<28)) {
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = v >> 14;
+ } else if (v < (1 << 28)) {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = v>>21;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = (v >> 14) | B;
+ *(ptr++) = v >> 21;
} else {
*(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = (v>>21) | B;
- *(ptr++) = v>>28;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = (v >> 14) | B;
+ *(ptr++) = (v >> 21) | B;
+ *(ptr++) = v >> 28;
}
return reinterpret_cast<char*>(ptr);
}
static const int B = 128;
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
while (v >= B) {
- *(ptr++) = (v & (B-1)) | B;
+ *(ptr++) = v | B;
v >>= 7;
}
*(ptr++) = static_cast<unsigned char>(v);
return len;
}
-const char* GetVarint32PtrFallback(const char* p,
- const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value) {
uint32_t result = 0;
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
uint32_t len;
- if (GetVarint32(input, &len) &&
- input->size() >= len) {
+ if (GetVarint32(input, &len) && input->size() >= len) {
*result = Slice(input->data(), len);
input->remove_prefix(len);
return true;
memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
return result;
} else {
- return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
- | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
+ return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0]))) |
+ (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8) |
+ (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16) |
+ (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
}
}
}
// Internal routine for use by fallback path of GetVarint32Ptr
-const char* GetVarint32PtrFallback(const char* p,
- const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value);
-inline const char* GetVarint32Ptr(const char* p,
- const char* limit,
+inline const char* GetVarint32Ptr(const char* p, const char* limit,
uint32_t* value) {
if (p < limit) {
uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
namespace leveldb {
-class Coding { };
+class Coding {};
TEST(Coding, Fixed32) {
std::string s;
uint64_t v = static_cast<uint64_t>(1) << power;
uint64_t actual;
actual = DecodeFixed64(p);
- ASSERT_EQ(v-1, actual);
+ ASSERT_EQ(v - 1, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
- ASSERT_EQ(v+0, actual);
+ ASSERT_EQ(v + 0, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
- ASSERT_EQ(v+1, actual);
+ ASSERT_EQ(v + 1, actual);
p += sizeof(uint64_t);
}
}
// Test values near powers of two
const uint64_t power = 1ull << k;
values.push_back(power);
- values.push_back(power-1);
- values.push_back(power+1);
+ values.push_back(power - 1);
+ values.push_back(power + 1);
}
std::string s;
TEST(Coding, Varint32Overflow) {
uint32_t result;
std::string input("\x81\x82\x83\x84\x85\x11");
- ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
- == nullptr);
+ ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
+ &result) == nullptr);
}
TEST(Coding, Varint32Truncation) {
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
}
- ASSERT_TRUE(
- GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
+ ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
+ nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Varint64Overflow) {
uint64_t result;
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
- ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
- == nullptr);
+ ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
+ &result) == nullptr);
}
TEST(Coding, Varint64Truncation) {
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
- ASSERT_TRUE(
- GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
+ ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
+ nullptr);
ASSERT_EQ(large_value, result);
}
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
namespace leveldb {
-Comparator::~Comparator() { }
+Comparator::~Comparator() {}
namespace {
class BytewiseComparatorImpl : public Comparator {
public:
- BytewiseComparatorImpl() { }
+ BytewiseComparatorImpl() {}
- virtual const char* Name() const {
- return "leveldb.BytewiseComparator";
- }
+ virtual const char* Name() const { return "leveldb.BytewiseComparator"; }
virtual int Compare(const Slice& a, const Slice& b) const {
return a.compare(b);
}
- virtual void FindShortestSeparator(
- std::string* start,
- const Slice& limit) const {
+ virtual void FindShortestSeparator(std::string* start,
+ const Slice& limit) const {
// Find length of common prefix
size_t min_length = std::min(start->size(), limit.size());
size_t diff_index = 0;
const uint8_t byte = (*key)[i];
if (byte != static_cast<uint8_t>(0xff)) {
(*key)[i] = byte + 1;
- key->resize(i+1);
+ key->resize(i + 1);
return;
}
}
template <int N>
constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
return reinterpret_cast<uint8_t*>(
- (reinterpret_cast<uintptr_t>(pointer) + (N - 1))
- & ~static_cast<uintptr_t>(N - 1));
+ (reinterpret_cast<uintptr_t>(pointer) + (N - 1)) &
+ ~static_cast<uintptr_t>(N - 1));
}
} // namespace
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
}
-uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
+uint32_t Extend(uint32_t crc, const char* data, size_t n) {
static bool accelerate = CanAccelerateCRC32C();
if (accelerate) {
- return port::AcceleratedCRC32C(crc, buf, size);
+ return port::AcceleratedCRC32C(crc, data, n);
}
- const uint8_t* p = reinterpret_cast<const uint8_t*>(buf);
- const uint8_t* e = p + size;
+ const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
+ const uint8_t* e = p + n;
uint32_t l = crc ^ kCRC32Xor;
// Process one byte at a time.
uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
// Return the crc32c of data[0,n-1]
-inline uint32_t Value(const char* data, size_t n) {
- return Extend(0, data, n);
-}
+inline uint32_t Value(const char* data, size_t n) { return Extend(0, data, n); }
static const uint32_t kMaskDelta = 0xa282ead8ul;
namespace leveldb {
namespace crc32c {
-class CRC { };
+class CRC {};
TEST(CRC, StandardResults) {
// From rfc3720 section B.4.
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
unsigned char data[48] = {
- 0x01, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x14, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x14,
- 0x00, 0x00, 0x00, 0x18,
- 0x28, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
}
-TEST(CRC, Values) {
- ASSERT_NE(Value("a", 1), Value("foo", 3));
-}
+TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
TEST(CRC, Extend) {
- ASSERT_EQ(Value("hello world", 11),
- Extend(Value("hello ", 6), "world", 5));
+ ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
}
TEST(CRC, Mask) {
} // namespace crc32c
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
namespace leveldb {
-Env::~Env() {
-}
+Env::~Env() {}
Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
return Status::NotSupported("NewAppendableFile", fname);
}
-SequentialFile::~SequentialFile() {
-}
+SequentialFile::~SequentialFile() {}
-RandomAccessFile::~RandomAccessFile() {
-}
+RandomAccessFile::~RandomAccessFile() {}
-WritableFile::~WritableFile() {
-}
+WritableFile::~WritableFile() {}
-Logger::~Logger() {
-}
+Logger::~Logger() {}
-FileLock::~FileLock() {
-}
+FileLock::~FileLock() {}
void Log(Logger* info_log, const char* format, ...) {
if (info_log != nullptr) {
}
static Status DoWriteStringToFile(Env* env, const Slice& data,
- const std::string& fname,
- bool should_sync) {
+ const std::string& fname, bool should_sync) {
WritableFile* file;
Status s = env->NewWritableFile(fname, &file);
if (!s.ok()) {
return s;
}
-EnvWrapper::~EnvWrapper() {
-}
+EnvWrapper::~EnvWrapper() {}
} // namespace leveldb
#include "leveldb/status.h"
#include "port/port.h"
#include "port/thread_annotations.h"
-#include "util/posix_logger.h"
#include "util/env_posix_test_helper.h"
+#include "util/posix_logger.h"
namespace leveldb {
int old_acquires_allowed =
acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
- if (old_acquires_allowed > 0)
- return true;
+ if (old_acquires_allowed > 0) return true;
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
return false;
// Release a resource acquired by a previous call to Acquire() that returned
// true.
- void Release() {
- acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
- }
+ void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
private:
// The number of available resources.
private:
const bool has_permanent_fd_; // If false, the file is opened on every read.
- const int fd_; // -1 if has_permanent_fd_ is false.
+ const int fd_; // -1 if has_permanent_fd_ is false.
Limiter* const fd_limiter_;
const std::string filename_;
};
// instance is destroyed.
PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
Limiter* mmap_limiter)
- : mmap_base_(mmap_base), length_(length), mmap_limiter_(mmap_limiter),
+ : mmap_base_(mmap_base),
+ length_(length),
+ mmap_limiter_(mmap_limiter),
filename_(std::move(filename)) {}
~PosixMmapReadableFile() override {
class PosixWritableFile final : public WritableFile {
public:
PosixWritableFile(std::string filename, int fd)
- : pos_(0), fd_(fd), is_manifest_(IsManifest(filename)),
- filename_(std::move(filename)), dirname_(Dirname(filename_)) {}
+ : pos_(0),
+ fd_(fd),
+ is_manifest_(IsManifest(filename)),
+ filename_(std::move(filename)),
+ dirname_(Dirname(filename_)) {}
~PosixWritableFile() override {
if (fd_ >= 0) {
return status;
}
- Status Flush() override {
- return FlushBuffer();
- }
+ Status Flush() override { return FlushBuffer(); }
Status Sync() override {
// Ensure new files referred to by the manifest are in the filesystem.
uint64_t file_size;
Status status = GetFileSize(filename, &file_size);
if (status.ok()) {
- void* mmap_base = ::mmap(/*addr=*/nullptr, file_size, PROT_READ,
- MAP_SHARED, fd, 0);
+ void* mmap_base =
+ ::mmap(/*addr=*/nullptr, file_size, PROT_READ, MAP_SHARED, fd, 0);
if (mmap_base != MAP_FAILED) {
- *result = new PosixMmapReadableFile(
- filename, reinterpret_cast<char*>(mmap_base), file_size,
- &mmap_limiter_);
+ *result = new PosixMmapReadableFile(filename,
+ reinterpret_cast<char*>(mmap_base),
+ file_size, &mmap_limiter_);
} else {
status = PosixError(filename, errno);
}
return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
}
- void SleepForMicroseconds(int micros) override {
- ::usleep(micros);
- }
+ void SleepForMicroseconds(int micros) override { ::usleep(micros); }
private:
void BackgroundThreadMain();
explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
: function(function), arg(arg) {}
- void (* const function)(void*);
+ void (*const function)(void*);
void* const arg;
};
-
port::Mutex background_work_mutex_;
port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
bool started_background_thread_ GUARDED_BY(background_work_mutex_);
PosixLockTable locks_; // Thread-safe.
Limiter mmap_limiter_; // Thread-safe.
- Limiter fd_limiter_; // Thread-safe.
+ Limiter fd_limiter_; // Thread-safe.
};
// Return the maximum number of concurrent mmaps.
-int MaxMmaps() {
- return g_mmap_limit;
-}
+int MaxMmaps() { return g_mmap_limit; }
// Return the maximum number of read-only files to keep open.
int MaxOpenFiles() {
: background_work_cv_(&background_work_mutex_),
started_background_thread_(false),
mmap_limiter_(MaxMmaps()),
- fd_limiter_(MaxOpenFiles()) {
-}
+ fd_limiter_(MaxOpenFiles()) {}
void PosixEnv::Schedule(
void (*background_work_function)(void* background_work_arg),
}
assert(!background_work_queue_.empty());
- auto background_work_function =
- background_work_queue_.front().function;
+ auto background_work_function = background_work_queue_.front().function;
void* background_work_arg = background_work_queue_.front().arg;
background_work_queue_.pop();
// static PlatformSingletonEnv default_env;
// return default_env.env();
// }
-template<typename EnvType>
+template <typename EnvType>
class SingletonEnv {
public:
SingletonEnv() {
};
#if !defined(NDEBUG)
-template<typename EnvType>
+template <typename EnvType>
std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
#endif // !defined(NDEBUG)
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "leveldb/env.h"
-
#include "port/port.h"
-#include "util/testharness.h"
#include "util/env_posix_test_helper.h"
+#include "util/testharness.h"
namespace leveldb {
-static const int kDelayMicros = 100000;
static const int kReadOnlyFileLimit = 4;
static const int kMMapLimit = 4;
class EnvPosixTest {
public:
- Env* env_;
- EnvPosixTest() : env_(Env::Default()) { }
-
static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
+
+ EnvPosixTest() : env_(Env::Default()) {}
+
+ Env* env_;
};
TEST(EnvPosixTest, TestOpenOnRead) {
namespace leveldb {
static const int kDelayMicros = 100000;
-static const int kReadOnlyFileLimit = 4;
-static const int kMMapLimit = 4;
class EnvTest {
public:
+ EnvTest() : env_(Env::Default()) {}
+
Env* env_;
- EnvTest() : env_(Env::Default()) { }
};
namespace {
const int id_; // Order# for the execution of this callback.
Callback(std::atomic<int>* last_id_ptr, int id)
- : last_id_ptr_(last_id_ptr), id_(id) { }
+ : last_id_ptr_(last_id_ptr), id_(id) {}
static void Run(void* arg) {
Callback* callback = reinterpret_cast<Callback*>(arg);
int val GUARDED_BY(mu);
int num_running GUARDED_BY(mu);
- State(int val, int num_running) : val(val), num_running(num_running) { }
+ State(int val, int num_running) : val(val), num_running(num_running) {}
};
static void ThreadBody(void* arg) {
ASSERT_TRUE(!env_->FileExists(non_existent_file));
RandomAccessFile* random_access_file;
- Status status = env_->NewRandomAccessFile(
- non_existent_file, &random_access_file);
+ Status status =
+ env_->NewRandomAccessFile(non_existent_file, &random_access_file);
ASSERT_TRUE(status.IsNotFound());
SequentialFile* sequential_file;
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
int old_acquires_allowed =
acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
- if (old_acquires_allowed > 0)
- return true;
+ if (old_acquires_allowed > 0) return true;
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
return false;
// Release a resource acquired by a previous call to Acquire() that returned
// true.
- void Release() {
- acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
- }
+ void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
private:
// The number of available resources.
}
private:
- // BGThread() is the body of the background thread
- void BGThread();
-
- std::mutex mu_;
- std::condition_variable bgsignal_;
- bool started_bgthread_;
-
// Entry per Schedule() call
struct BGItem {
void* arg;
void (*function)(void*);
};
- typedef std::deque<BGItem> BGQueue;
- BGQueue queue_;
+ // BGThread() is the body of the background thread
+ void BGThread();
+
+ std::mutex mu_;
+ std::condition_variable bgsignal_;
+ bool started_bgthread_;
+ std::deque<BGItem> queue_;
Limiter mmap_limiter_;
};
class EnvWindowsTest {
public:
- Env* env_;
- EnvWindowsTest() : env_(Env::Default()) {}
-
static void SetFileLimits(int mmap_limit) {
EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
+
+ EnvWindowsTest() : env_(Env::Default()) {}
+
+ Env* env_;
};
TEST(EnvWindowsTest, TestOpenOnRead) {
namespace leveldb {
-FilterPolicy::~FilterPolicy() { }
+FilterPolicy::~FilterPolicy() {}
} // namespace leveldb
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "util/hash.h"
+
#include <string.h>
+
#include "util/coding.h"
-#include "util/hash.h"
// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
// between switch labels. The real definition should be provided externally.
// This one is a fallback version for unsupported compilers.
#ifndef FALLTHROUGH_INTENDED
-#define FALLTHROUGH_INTENDED do { } while (0)
+#define FALLTHROUGH_INTENDED \
+ do { \
+ } while (0)
#endif
namespace leveldb {
return h;
}
-
} // namespace leveldb
namespace leveldb {
-class HASH { };
+class HASH {};
TEST(HASH, SignedUnsignedIssue) {
const unsigned char data1[1] = {0x62};
const unsigned char data3[3] = {0xe2, 0x99, 0xa5};
const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32};
const unsigned char data5[48] = {
- 0x01, 0xc0, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x14, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x14,
- 0x00, 0x00, 0x00, 0x18,
- 0x28, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34);
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "util/histogram.h"
+
#include <math.h>
#include <stdio.h>
+
#include "port/port.h"
-#include "util/histogram.h"
namespace leveldb {
const double Histogram::kBucketLimit[kNumBuckets] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45,
- 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450,
- 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000,
- 3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000,
- 16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
- 70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000,
- 250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000,
- 900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000,
- 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000,
- 9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000,
- 25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000,
- 70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000,
- 180000000, 200000000, 250000000, 300000000, 350000000, 400000000,
- 450000000, 500000000, 600000000, 700000000, 800000000, 900000000,
- 1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000,
- 2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0,
- 5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0,
- 1e200,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 12,
+ 14,
+ 16,
+ 18,
+ 20,
+ 25,
+ 30,
+ 35,
+ 40,
+ 45,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 120,
+ 140,
+ 160,
+ 180,
+ 200,
+ 250,
+ 300,
+ 350,
+ 400,
+ 450,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1200,
+ 1400,
+ 1600,
+ 1800,
+ 2000,
+ 2500,
+ 3000,
+ 3500,
+ 4000,
+ 4500,
+ 5000,
+ 6000,
+ 7000,
+ 8000,
+ 9000,
+ 10000,
+ 12000,
+ 14000,
+ 16000,
+ 18000,
+ 20000,
+ 25000,
+ 30000,
+ 35000,
+ 40000,
+ 45000,
+ 50000,
+ 60000,
+ 70000,
+ 80000,
+ 90000,
+ 100000,
+ 120000,
+ 140000,
+ 160000,
+ 180000,
+ 200000,
+ 250000,
+ 300000,
+ 350000,
+ 400000,
+ 450000,
+ 500000,
+ 600000,
+ 700000,
+ 800000,
+ 900000,
+ 1000000,
+ 1200000,
+ 1400000,
+ 1600000,
+ 1800000,
+ 2000000,
+ 2500000,
+ 3000000,
+ 3500000,
+ 4000000,
+ 4500000,
+ 5000000,
+ 6000000,
+ 7000000,
+ 8000000,
+ 9000000,
+ 10000000,
+ 12000000,
+ 14000000,
+ 16000000,
+ 18000000,
+ 20000000,
+ 25000000,
+ 30000000,
+ 35000000,
+ 40000000,
+ 45000000,
+ 50000000,
+ 60000000,
+ 70000000,
+ 80000000,
+ 90000000,
+ 100000000,
+ 120000000,
+ 140000000,
+ 160000000,
+ 180000000,
+ 200000000,
+ 250000000,
+ 300000000,
+ 350000000,
+ 400000000,
+ 450000000,
+ 500000000,
+ 600000000,
+ 700000000,
+ 800000000,
+ 900000000,
+ 1000000000,
+ 1200000000,
+ 1400000000,
+ 1600000000,
+ 1800000000,
+ 2000000000,
+ 2500000000.0,
+ 3000000000.0,
+ 3500000000.0,
+ 4000000000.0,
+ 4500000000.0,
+ 5000000000.0,
+ 6000000000.0,
+ 7000000000.0,
+ 8000000000.0,
+ 9000000000.0,
+ 1e200,
};
void Histogram::Clear() {
- min_ = kBucketLimit[kNumBuckets-1];
+ min_ = kBucketLimit[kNumBuckets - 1];
max_ = 0;
num_ = 0;
sum_ = 0;
}
}
-double Histogram::Median() const {
- return Percentile(50.0);
-}
+double Histogram::Median() const { return Percentile(50.0); }
double Histogram::Percentile(double p) const {
double threshold = num_ * (p / 100.0);
sum += buckets_[b];
if (sum >= threshold) {
// Scale linearly within this bucket
- double left_point = (b == 0) ? 0 : kBucketLimit[b-1];
+ double left_point = (b == 0) ? 0 : kBucketLimit[b - 1];
double right_point = kBucketLimit[b];
double left_sum = sum - buckets_[b];
double right_sum = sum;
std::string Histogram::ToString() const {
std::string r;
char buf[200];
- snprintf(buf, sizeof(buf),
- "Count: %.0f Average: %.4f StdDev: %.2f\n",
- num_, Average(), StandardDeviation());
+ snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_,
+ Average(), StandardDeviation());
r.append(buf);
- snprintf(buf, sizeof(buf),
- "Min: %.4f Median: %.4f Max: %.4f\n",
+ snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n",
(num_ == 0.0 ? 0.0 : min_), Median(), max_);
r.append(buf);
r.append("------------------------------------------------------\n");
for (int b = 0; b < kNumBuckets; b++) {
if (buckets_[b] <= 0.0) continue;
sum += buckets_[b];
- snprintf(buf, sizeof(buf),
- "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
- ((b == 0) ? 0.0 : kBucketLimit[b-1]), // left
- kBucketLimit[b], // right
- buckets_[b], // count
- mult * buckets_[b], // percentage
- mult * sum); // cumulative percentage
+ snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
+ ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left
+ kBucketLimit[b], // right
+ buckets_[b], // count
+ mult * buckets_[b], // percentage
+ mult * sum); // cumulative percentage
r.append(buf);
// Add hash marks based on percentage; 20 marks for 100%.
- int marks = static_cast<int>(20*(buckets_[b] / num_) + 0.5);
+ int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5);
r.append(marks, '#');
r.push_back('\n');
}
class Histogram {
public:
- Histogram() { }
- ~Histogram() { }
+ Histogram() {}
+ ~Histogram() {}
void Clear();
void Add(double value);
std::string ToString() const;
private:
+ enum { kNumBuckets = 154 };
+
+ double Median() const;
+ double Percentile(double p) const;
+ double Average() const;
+ double StandardDeviation() const;
+
+ static const double kBucketLimit[kNumBuckets];
+
double min_;
double max_;
double num_;
double sum_;
double sum_squares_;
- enum { kNumBuckets = 154 };
- static const double kBucketLimit[kNumBuckets];
double buckets_[kNumBuckets];
-
- double Median() const;
- double Percentile(double p) const;
- double Average() const;
- double StandardDeviation() const;
};
} // namespace leveldb
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
+
#include <limits>
+
#include "leveldb/env.h"
#include "leveldb/slice.h"
void AppendNumberTo(std::string* str, uint64_t num) {
char buf[30];
- snprintf(buf, sizeof(buf), "%llu", (unsigned long long) num);
+ snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num);
str->append(buf);
}
const unsigned char* current = start;
for (; current != end; ++current) {
const unsigned char ch = *current;
- if (ch < '0' || ch > '9')
- break;
+ if (ch < '0' || ch > '9') break;
// Overflow check.
// kMaxUint64 / 10 is also constant and will be optimized away.
#ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_
#define STORAGE_LEVELDB_UTIL_LOGGING_H_
-#include <stdio.h>
#include <stdint.h>
+#include <stdio.h>
+
#include <string>
+
#include "port/port.h"
namespace leveldb {
namespace leveldb {
-class Logging { };
+class Logging {};
TEST(Logging, NumberToString) {
ASSERT_EQ("0", NumberToString(0));
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
class SCOPED_LOCKABLE MutexLock {
public:
- explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
- : mu_(mu) {
+ explicit MutexLock(port::Mutex* mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
MutexLock& operator=(const MutexLock&) = delete;
private:
- port::Mutex *const mu_;
+ port::Mutex* const mu_;
};
} // namespace leveldb
-
#endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
// Wraps an instance whose destructor is never called.
//
// This is intended for use with function-level static variables.
-template<typename InstanceType>
+template <typename InstanceType>
class NoDestructor {
public:
template <typename... ConstructorArgTypes>
static_assert(
alignof(decltype(instance_storage_)) >= alignof(InstanceType),
"instance_storage_ does not meet the instance's alignment requirement");
- new (&instance_storage_) InstanceType(
- std::forward<ConstructorArgTypes>(constructor_args)...);
+ new (&instance_storage_)
+ InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...);
}
~NoDestructor() = default;
}
private:
- typename
- std::aligned_storage<sizeof(InstanceType), alignof(InstanceType)>::type
- instance_storage_;
+ typename std::aligned_storage<sizeof(InstanceType),
+ alignof(InstanceType)>::type instance_storage_;
};
} // namespace leveldb
} // namespace
-class NoDestructorTest { };
+class NoDestructorTest {};
TEST(NoDestructorTest, StackInstance) {
NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
namespace leveldb {
-Options::Options()
- : comparator(BytewiseComparator()),
- env(Env::Default()) {
-}
+Options::Options() : comparator(BytewiseComparator()), env(Env::Default()) {}
} // namespace leveldb
// Creates a logger that writes to the given file.
//
// The PosixLogger instance takes ownership of the file handle.
- explicit PosixLogger(std::FILE* fp) : fp_(fp) {
- assert(fp != nullptr);
- }
+ explicit PosixLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
- ~PosixLogger() override {
- std::fclose(fp_);
- }
+ ~PosixLogger() override { std::fclose(fp_); }
void Logv(const char* format, va_list arguments) override {
// Record the time as close to the Logv() call as possible.
// Print the header into the buffer.
int buffer_offset = snprintf(
- buffer, buffer_size,
- "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
- now_components.tm_year + 1900,
- now_components.tm_mon + 1,
- now_components.tm_mday,
- now_components.tm_hour,
- now_components.tm_min,
- now_components.tm_sec,
- static_cast<int>(now_timeval.tv_usec),
+ buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+ now_components.tm_year + 1900, now_components.tm_mon + 1,
+ now_components.tm_mday, now_components.tm_hour, now_components.tm_min,
+ now_components.tm_sec, static_cast<int>(now_timeval.tv_usec),
thread_id.c_str());
// The header can be at most 28 characters (10 date + 15 time +
// Print the message into the buffer.
std::va_list arguments_copy;
va_copy(arguments_copy, arguments);
- buffer_offset += std::vsnprintf(buffer + buffer_offset,
- buffer_size - buffer_offset, format,
- arguments_copy);
+ buffer_offset +=
+ std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+ format, arguments_copy);
va_end(arguments_copy);
// The code below may append a newline at the end of the buffer, which
class Random {
private:
uint32_t seed_;
+
public:
explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
// Avoid bad seeds.
}
}
uint32_t Next() {
- static const uint32_t M = 2147483647L; // 2^31-1
- static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
+ static const uint32_t M = 2147483647L; // 2^31-1
+ static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
// We are computing
// seed_ = (seed_ * A) % M, where M = 2^31-1
//
// Skewed: pick "base" uniformly from range [0,max_log] and then
// return "base" random bits. The effect is to pick a number in the
// range [0,2^max_log-1] with exponential bias towards smaller numbers.
- uint32_t Skewed(int max_log) {
- return Uniform(1 << Uniform(max_log + 1));
- }
+ uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); }
};
} // namespace leveldb
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
+#include "leveldb/status.h"
+
#include <stdio.h>
+
#include "port/port.h"
-#include "leveldb/status.h"
namespace leveldb {
type = "IO error: ";
break;
default:
- snprintf(tmp, sizeof(tmp), "Unknown code(%d): ",
- static_cast<int>(code()));
+ snprintf(tmp, sizeof(tmp),
+ "Unknown code(%d): ", static_cast<int>(code()));
type = tmp;
break;
}
} // namespace leveldb
-int main(int argc, char** argv) {
- return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
void (*func)();
};
std::vector<Test>* tests;
-}
+} // namespace
bool RegisterTest(const char* base, const char* name, void (*func)()) {
if (tests == nullptr) {
std::stringstream ss_;
public:
- Tester(const char* f, int l)
- : ok_(true), fname_(f), line_(l) {
- }
+ Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) {}
~Tester() {
if (!ok_) {
return *this;
}
-#define BINARY_OP(name, op) \
- template <class X, class Y> \
- Tester& name(const X& x, const Y& y) { \
- if (!(x op y)) { \
- ss_ << " failed: " << x << (" " #op " ") << y; \
- ok_ = false; \
- } \
- return *this; \
+#define BINARY_OP(name, op) \
+ template <class X, class Y> \
+ Tester& name(const X& x, const Y& y) { \
+ if (!(x op y)) { \
+ ss_ << " failed: " << x << (" " #op " ") << y; \
+ ok_ = false; \
+ } \
+ return *this; \
}
BINARY_OP(IsEq, ==)
#define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c)
#define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s))
-#define ASSERT_EQ(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a),(b))
-#define ASSERT_NE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a),(b))
-#define ASSERT_GE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a),(b))
-#define ASSERT_GT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a),(b))
-#define ASSERT_LE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a),(b))
-#define ASSERT_LT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a),(b))
+#define ASSERT_EQ(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a), (b))
+#define ASSERT_NE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a), (b))
+#define ASSERT_GE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a), (b))
+#define ASSERT_GT(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a), (b))
+#define ASSERT_LE(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a), (b))
+#define ASSERT_LT(a, b) \
+ ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a), (b))
#define TCONCAT(a, b) TCONCAT1(a, b)
#define TCONCAT1(a, b) a##b
-#define TEST(base, name) \
-class TCONCAT(_Test_, name) : public base { \
- public: \
- void _Run(); \
- static void _RunIt() { \
- TCONCAT(_Test_, name) t; \
- t._Run(); \
- } \
-}; \
-bool TCONCAT(_Test_ignored_, name) = \
- ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_, name)::_RunIt); \
-void TCONCAT(_Test_, name)::_Run()
+#define TEST(base, name) \
+ class TCONCAT(_Test_, name) : public base { \
+ public: \
+ void _Run(); \
+ static void _RunIt() { \
+ TCONCAT(_Test_, name) t; \
+ t._Run(); \
+ } \
+ }; \
+ bool TCONCAT(_Test_ignored_, name) = ::leveldb::test::RegisterTest( \
+ #base, #name, &TCONCAT(_Test_, name)::_RunIt); \
+ void TCONCAT(_Test_, name)::_Run()
// Register the specified test. Typically not used directly, but
// invoked via the macro expansion of TEST.
Slice RandomString(Random* rnd, int len, std::string* dst) {
dst->resize(len);
for (int i = 0; i < len; i++) {
- (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
+ (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~'
}
return Slice(*dst);
}
std::string RandomKey(Random* rnd, int len) {
// Make sure to generate a wide variety of characters so we
// test the boundary conditions for short-key optimizations.
- static const char kTestChars[] = {
- '\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'
- };
+ static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c',
+ 'd', 'e', '\xfd', '\xfe', '\xff'};
std::string result;
for (int i = 0; i < len; i++) {
result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
return result;
}
-
-Slice CompressibleString(Random* rnd, double compressed_fraction,
- size_t len, std::string* dst) {
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+ std::string* dst) {
int raw = static_cast<int>(len * compressed_fraction);
if (raw < 1) raw = 1;
std::string raw_data;
// Store in *dst a string of length "len" that will compress to
// "N*compressed_fraction" bytes and return a Slice that references
// the generated data.
-Slice CompressibleString(Random* rnd, double compressed_fraction,
- size_t len, std::string* dst);
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+ std::string* dst);
// A wrapper that allows injection of errors.
class ErrorEnv : public EnvWrapper {
bool writable_file_error_;
int num_writable_file_errors_;
- ErrorEnv() : EnvWrapper(NewMemEnv(Env::Default())),
- writable_file_error_(false),
- num_writable_file_errors_(0) { }
- ~ErrorEnv() override {
- delete target();
- }
+ ErrorEnv()
+ : EnvWrapper(NewMemEnv(Env::Default())),
+ writable_file_error_(false),
+ num_writable_file_errors_(0) {}
+ ~ErrorEnv() override { delete target(); }
Status NewWritableFile(const std::string& fname,
WritableFile** result) override {
// Creates a logger that writes to the given file.
//
// The PosixLogger instance takes ownership of the file handle.
- explicit WindowsLogger(std::FILE* fp) : fp_(fp) {
- assert(fp != nullptr);
- }
+ explicit WindowsLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
- ~WindowsLogger() override {
- std::fclose(fp_);
- }
+ ~WindowsLogger() override { std::fclose(fp_); }
void Logv(const char* format, va_list arguments) override {
// Record the time as close to the Logv() call as possible.
// Print the header into the buffer.
int buffer_offset = snprintf(
- buffer, buffer_size,
- "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
- now_components.wYear,
- now_components.wMonth,
- now_components.wDay,
- now_components.wHour,
- now_components.wMinute,
- now_components.wSecond,
+ buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+ now_components.wYear, now_components.wMonth, now_components.wDay,
+ now_components.wHour, now_components.wMinute, now_components.wSecond,
static_cast<int>(now_components.wMilliseconds * 1000),
thread_id.c_str());
// Print the message into the buffer.
std::va_list arguments_copy;
va_copy(arguments_copy, arguments);
- buffer_offset += std::vsnprintf(buffer + buffer_offset,
- buffer_size - buffer_offset, format,
- arguments_copy);
+ buffer_offset +=
+ std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+ format, arguments_copy);
va_end(arguments_copy);
// The code below may append a newline at the end of the buffer, which