1 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. See the AUTHORS file for names of contributors.
5 #include "db/db_impl.h"
13 #include "db/builder.h"
14 #include "db/db_iter.h"
15 #include "db/dbformat.h"
16 #include "db/filename.h"
17 #include "db/log_reader.h"
18 #include "db/log_writer.h"
19 #include "db/memtable.h"
20 #include "db/table_cache.h"
21 #include "db/version_set.h"
22 #include "db/write_batch_internal.h"
23 #include "leveldb/db.h"
24 #include "leveldb/env.h"
25 #include "leveldb/status.h"
26 #include "leveldb/table.h"
27 #include "leveldb/table_builder.h"
28 #include "port/port.h"
29 #include "table/block.h"
30 #include "table/merger.h"
31 #include "table/two_level_iterator.h"
32 #include "util/coding.h"
33 #include "util/logging.h"
34 #include "util/mutexlock.h"
38 struct DBImpl::CompactionState {
39 Compaction* const compaction;
41 // Sequence numbers < smallest_snapshot are not significant since we
42 // will never have to service a snapshot below smallest_snapshot.
43 // Therefore if we have seen a sequence number S <= smallest_snapshot,
44 // we can drop all entries for the same key with sequence numbers < S.
45 SequenceNumber smallest_snapshot;
47 // Files produced by compaction
51 InternalKey smallest, largest;
53 std::vector<Output> outputs;
55 // State kept for output being generated
56 WritableFile* outfile;
57 TableBuilder* builder;
61 Output* current_output() { return &outputs[outputs.size()-1]; }
63 explicit CompactionState(Compaction* c)
71 // Fix user-supplied options to be reasonable
72 template <class T,class V>
73 static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
74 if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
75 if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
77 Options SanitizeOptions(const std::string& dbname,
78 const InternalKeyComparator* icmp,
81 result.comparator = icmp;
82 ClipToRange(&result.max_open_files, 20, 50000);
83 ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
84 ClipToRange(&result.block_size, 1<<10, 4<<20);
85 if (result.info_log == NULL) {
86 // Open a log file in the same directory as the db
87 src.env->CreateDir(dbname); // In case it does not exist
88 src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname));
89 Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log);
91 // No place suitable for logging
92 result.info_log = NULL;
95 if (result.block_cache == NULL) {
96 result.block_cache = NewLRUCache(8 << 20);
101 DBImpl::DBImpl(const Options& options, const std::string& dbname)
103 internal_comparator_(options.comparator),
104 options_(SanitizeOptions(dbname, &internal_comparator_, options)),
105 owns_info_log_(options_.info_log != options.info_log),
106 owns_cache_(options_.block_cache != options.block_cache),
109 shutting_down_(NULL),
111 mem_(new MemTable(internal_comparator_)),
118 bg_compaction_scheduled_(false),
119 manual_compaction_(NULL) {
121 has_imm_.Release_Store(NULL);
123 // Reserve ten files or so for other uses and give the rest to TableCache.
124 const int table_cache_size = options.max_open_files - 10;
125 table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
127 versions_ = new VersionSet(dbname_, &options_, table_cache_,
128 &internal_comparator_);
132 // Wait for background work to finish
134 shutting_down_.Release_Store(this); // Any non-NULL value is ok
135 while (bg_compaction_scheduled_) {
140 if (db_lock_ != NULL) {
141 env_->UnlockFile(db_lock_);
145 if (mem_ != NULL) mem_->Unref();
146 if (imm_ != NULL) imm_->Unref();
151 if (owns_info_log_) {
152 delete options_.info_log;
155 delete options_.block_cache;
159 Status DBImpl::NewDB() {
161 new_db.SetComparatorName(user_comparator()->Name());
162 new_db.SetLogNumber(0);
163 new_db.SetNextFile(2);
164 new_db.SetLastSequence(0);
166 const std::string manifest = DescriptorFileName(dbname_, 1);
168 Status s = env_->NewWritableFile(manifest, &file);
173 log::Writer log(file);
175 new_db.EncodeTo(&record);
176 s = log.AddRecord(record);
183 // Make "CURRENT" file that points to the new manifest file.
184 s = SetCurrentFile(env_, dbname_, 1);
186 env_->DeleteFile(manifest);
191 void DBImpl::MaybeIgnoreError(Status* s) const {
192 if (s->ok() || options_.paranoid_checks) {
195 Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
200 void DBImpl::DeleteObsoleteFiles() {
201 // Make a set of all of the live files
202 std::set<uint64_t> live = pending_outputs_;
203 versions_->AddLiveFiles(&live);
205 std::vector<std::string> filenames;
206 env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
209 for (size_t i = 0; i < filenames.size(); i++) {
210 if (ParseFileName(filenames[i], &number, &type)) {
214 keep = ((number >= versions_->LogNumber()) ||
215 (number == versions_->PrevLogNumber()));
217 case kDescriptorFile:
218 // Keep my manifest file, and any newer incarnations'
219 // (in case there is a race that allows other incarnations)
220 keep = (number >= versions_->ManifestFileNumber());
223 keep = (live.find(number) != live.end());
226 // Any temp files that are currently being written to must
227 // be recorded in pending_outputs_, which is inserted into "live"
228 keep = (live.find(number) != live.end());
238 if (type == kTableFile) {
239 table_cache_->Evict(number);
241 Log(options_.info_log, "Delete type=%d #%lld\n",
243 static_cast<unsigned long long>(number));
244 env_->DeleteFile(dbname_ + "/" + filenames[i]);
250 Status DBImpl::Recover(VersionEdit* edit) {
253 // Ignore error from CreateDir since the creation of the DB is
254 // committed only when the descriptor is created, and this directory
255 // may already exist from a previous failed creation attempt.
256 env_->CreateDir(dbname_);
257 assert(db_lock_ == NULL);
258 Status s = env_->LockFile(LockFileName(dbname_), &db_lock_);
263 if (!env_->FileExists(CurrentFileName(dbname_))) {
264 if (options_.create_if_missing) {
270 return Status::InvalidArgument(
271 dbname_, "does not exist (create_if_missing is false)");
274 if (options_.error_if_exists) {
275 return Status::InvalidArgument(
276 dbname_, "exists (error_if_exists is true)");
280 s = versions_->Recover();
282 SequenceNumber max_sequence(0);
284 // Recover from all newer log files than the ones named in the
285 // descriptor (new log files may have been added by the previous
286 // incarnation without registering them in the descriptor).
288 // Note that PrevLogNumber() is no longer used, but we pay
289 // attention to it in case we are recovering a database
290 // produced by an older version of leveldb.
291 const uint64_t min_log = versions_->LogNumber();
292 const uint64_t prev_log = versions_->PrevLogNumber();
293 std::vector<std::string> filenames;
294 s = env_->GetChildren(dbname_, &filenames);
300 std::vector<uint64_t> logs;
301 for (size_t i = 0; i < filenames.size(); i++) {
302 if (ParseFileName(filenames[i], &number, &type)
304 && ((number >= min_log) || (number == prev_log))) {
305 logs.push_back(number);
309 // Recover in the order in which the logs were generated
310 std::sort(logs.begin(), logs.end());
311 for (size_t i = 0; i < logs.size(); i++) {
312 s = RecoverLogFile(logs[i], edit, &max_sequence);
314 // The previous incarnation may not have written any MANIFEST
315 // records after allocating this log number. So we manually
316 // update the file number allocation counter in VersionSet.
317 versions_->MarkFileNumberUsed(logs[i]);
321 if (versions_->LastSequence() < max_sequence) {
322 versions_->SetLastSequence(max_sequence);
330 Status DBImpl::RecoverLogFile(uint64_t log_number,
332 SequenceNumber* max_sequence) {
333 struct LogReporter : public log::Reader::Reporter {
337 Status* status; // NULL if options_.paranoid_checks==false
338 virtual void Corruption(size_t bytes, const Status& s) {
339 Log(info_log, "%s%s: dropping %d bytes; %s",
340 (this->status == NULL ? "(ignoring error) " : ""),
341 fname, static_cast<int>(bytes), s.ToString().c_str());
342 if (this->status != NULL && this->status->ok()) *this->status = s;
349 std::string fname = LogFileName(dbname_, log_number);
350 SequentialFile* file;
351 Status status = env_->NewSequentialFile(fname, &file);
353 MaybeIgnoreError(&status);
357 // Create the log reader.
358 LogReporter reporter;
360 reporter.info_log = options_.info_log;
361 reporter.fname = fname.c_str();
362 reporter.status = (options_.paranoid_checks ? &status : NULL);
363 // We intentially make log::Reader do checksumming even if
364 // paranoid_checks==false so that corruptions cause entire commits
365 // to be skipped instead of propagating bad information (like overly
366 // large sequence numbers).
367 log::Reader reader(file, &reporter, true/*checksum*/,
368 0/*initial_offset*/);
369 Log(options_.info_log, "Recovering log #%llu",
370 (unsigned long long) log_number);
372 // Read all the records and add to a memtable
376 MemTable* mem = NULL;
377 while (reader.ReadRecord(&record, &scratch) &&
379 if (record.size() < 12) {
381 record.size(), Status::Corruption("log record too small"));
384 WriteBatchInternal::SetContents(&batch, record);
387 mem = new MemTable(internal_comparator_);
390 status = WriteBatchInternal::InsertInto(&batch, mem);
391 MaybeIgnoreError(&status);
395 const SequenceNumber last_seq =
396 WriteBatchInternal::Sequence(&batch) +
397 WriteBatchInternal::Count(&batch) - 1;
398 if (last_seq > *max_sequence) {
399 *max_sequence = last_seq;
402 if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
403 status = WriteLevel0Table(mem, edit, NULL);
405 // Reflect errors immediately so that conditions like full
406 // file-systems cause the DB::Open() to fail.
414 if (status.ok() && mem != NULL) {
415 status = WriteLevel0Table(mem, edit, NULL);
416 // Reflect errors immediately so that conditions like full
417 // file-systems cause the DB::Open() to fail.
420 if (mem != NULL) mem->Unref();
425 Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
428 const uint64_t start_micros = env_->NowMicros();
430 meta.number = versions_->NewFileNumber();
431 pending_outputs_.insert(meta.number);
432 Iterator* iter = mem->NewIterator();
433 Log(options_.info_log, "Level-0 table #%llu: started",
434 (unsigned long long) meta.number);
439 s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
443 Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
444 (unsigned long long) meta.number,
445 (unsigned long long) meta.file_size,
446 s.ToString().c_str());
448 pending_outputs_.erase(meta.number);
451 // Note that if file_size is zero, the file has been deleted and
452 // should not be added to the manifest.
454 if (s.ok() && meta.file_size > 0) {
455 const Slice min_user_key = meta.smallest.user_key();
456 const Slice max_user_key = meta.largest.user_key();
458 level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
460 edit->AddFile(level, meta.number, meta.file_size,
461 meta.smallest, meta.largest);
464 CompactionStats stats;
465 stats.micros = env_->NowMicros() - start_micros;
466 stats.bytes_written = meta.file_size;
467 stats_[level].Add(stats);
471 Status DBImpl::CompactMemTable() {
473 assert(imm_ != NULL);
475 // Save the contents of the memtable as a new Table
477 Version* base = versions_->current();
479 Status s = WriteLevel0Table(imm_, &edit, base);
482 if (s.ok() && shutting_down_.Acquire_Load()) {
483 s = Status::IOError("Deleting DB during memtable compaction");
486 // Replace immutable memtable with the generated Table
488 edit.SetPrevLogNumber(0);
489 edit.SetLogNumber(logfile_number_); // Earlier logs no longer needed
490 s = versions_->LogAndApply(&edit, &mutex_);
494 // Commit to the new state
497 has_imm_.Release_Store(NULL);
498 DeleteObsoleteFiles();
504 void DBImpl::CompactRange(const Slice* begin, const Slice* end) {
505 int max_level_with_files = 1;
507 MutexLock l(&mutex_);
508 Version* base = versions_->current();
509 for (int level = 1; level < config::kNumLevels; level++) {
510 if (base->OverlapInLevel(level, begin, end)) {
511 max_level_with_files = level;
515 TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
516 for (int level = 0; level < max_level_with_files; level++) {
517 TEST_CompactRange(level, begin, end);
521 void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
523 assert(level + 1 < config::kNumLevels);
525 InternalKey begin_storage, end_storage;
527 ManualCompaction manual;
528 manual.level = level;
533 begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
534 manual.begin = &begin_storage;
539 end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
540 manual.end = &end_storage;
543 MutexLock l(&mutex_);
544 while (!manual.done) {
545 while (manual_compaction_ != NULL) {
548 manual_compaction_ = &manual;
549 MaybeScheduleCompaction();
550 while (manual_compaction_ == &manual) {
556 Status DBImpl::TEST_CompactMemTable() {
557 MutexLock l(&mutex_);
559 AcquireLoggingResponsibility(&self);
560 Status s = MakeRoomForWrite(true /* force compaction */);
561 ReleaseLoggingResponsibility(&self);
563 // Wait until the compaction completes
564 while (imm_ != NULL && bg_error_.ok()) {
574 void DBImpl::MaybeScheduleCompaction() {
576 if (bg_compaction_scheduled_) {
578 } else if (shutting_down_.Acquire_Load()) {
579 // DB is being deleted; no more background compactions
580 } else if (imm_ == NULL &&
581 manual_compaction_ == NULL &&
582 !versions_->NeedsCompaction()) {
583 // No work to be done
585 bg_compaction_scheduled_ = true;
586 env_->Schedule(&DBImpl::BGWork, this);
590 void DBImpl::BGWork(void* db) {
591 reinterpret_cast<DBImpl*>(db)->BackgroundCall();
594 void DBImpl::BackgroundCall() {
595 MutexLock l(&mutex_);
596 assert(bg_compaction_scheduled_);
597 if (!shutting_down_.Acquire_Load()) {
598 BackgroundCompaction();
600 bg_compaction_scheduled_ = false;
602 // Previous compaction may have produced too many files in a level,
603 // so reschedule another compaction if needed.
604 MaybeScheduleCompaction();
608 void DBImpl::BackgroundCompaction() {
617 bool is_manual = (manual_compaction_ != NULL);
618 InternalKey manual_end;
620 ManualCompaction* m = manual_compaction_;
621 c = versions_->CompactRange(m->level, m->begin, m->end);
622 m->done = (c == NULL);
624 manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
626 Log(options_.info_log,
627 "Manual compaction at level-%d from %s .. %s; will stop at %s\n",
629 (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
630 (m->end ? m->end->DebugString().c_str() : "(end)"),
631 (m->done ? "(end)" : manual_end.DebugString().c_str()));
633 c = versions_->PickCompaction();
639 } else if (!is_manual && c->IsTrivialMove()) {
640 // Move file to next level
641 assert(c->num_input_files(0) == 1);
642 FileMetaData* f = c->input(0, 0);
643 c->edit()->DeleteFile(c->level(), f->number);
644 c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
645 f->smallest, f->largest);
646 status = versions_->LogAndApply(c->edit(), &mutex_);
647 VersionSet::LevelSummaryStorage tmp;
648 Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
649 static_cast<unsigned long long>(f->number),
651 static_cast<unsigned long long>(f->file_size),
652 status.ToString().c_str(),
653 versions_->LevelSummary(&tmp));
655 CompactionState* compact = new CompactionState(c);
656 status = DoCompactionWork(compact);
657 CleanupCompaction(compact);
663 } else if (shutting_down_.Acquire_Load()) {
664 // Ignore compaction errors found during shutting down
666 Log(options_.info_log,
667 "Compaction error: %s", status.ToString().c_str());
668 if (options_.paranoid_checks && bg_error_.ok()) {
674 ManualCompaction* m = manual_compaction_;
676 // We only compacted part of the requested range. Update *m
677 // to the range that is left to be compacted.
678 m->tmp_storage = manual_end;
679 m->begin = &m->tmp_storage;
681 manual_compaction_ = NULL;
685 void DBImpl::CleanupCompaction(CompactionState* compact) {
687 if (compact->builder != NULL) {
688 // May happen if we get a shutdown call in the middle of compaction
689 compact->builder->Abandon();
690 delete compact->builder;
692 assert(compact->outfile == NULL);
694 delete compact->outfile;
695 for (size_t i = 0; i < compact->outputs.size(); i++) {
696 const CompactionState::Output& out = compact->outputs[i];
697 pending_outputs_.erase(out.number);
702 Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
703 assert(compact != NULL);
704 assert(compact->builder == NULL);
705 uint64_t file_number;
708 file_number = versions_->NewFileNumber();
709 pending_outputs_.insert(file_number);
710 CompactionState::Output out;
711 out.number = file_number;
712 out.smallest.Clear();
714 compact->outputs.push_back(out);
718 // Make the output file
719 std::string fname = TableFileName(dbname_, file_number);
720 Status s = env_->NewWritableFile(fname, &compact->outfile);
722 compact->builder = new TableBuilder(options_, compact->outfile);
727 Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
729 assert(compact != NULL);
730 assert(compact->outfile != NULL);
731 assert(compact->builder != NULL);
733 const uint64_t output_number = compact->current_output()->number;
734 assert(output_number != 0);
736 // Check for iterator errors
737 Status s = input->status();
738 const uint64_t current_entries = compact->builder->NumEntries();
740 s = compact->builder->Finish();
742 compact->builder->Abandon();
744 const uint64_t current_bytes = compact->builder->FileSize();
745 compact->current_output()->file_size = current_bytes;
746 compact->total_bytes += current_bytes;
747 delete compact->builder;
748 compact->builder = NULL;
750 // Finish and check for file errors
752 s = compact->outfile->Sync();
755 s = compact->outfile->Close();
757 delete compact->outfile;
758 compact->outfile = NULL;
760 if (s.ok() && current_entries > 0) {
761 // Verify that the table is usable
762 Iterator* iter = table_cache_->NewIterator(ReadOptions(),
768 Log(options_.info_log,
769 "Generated table #%llu: %lld keys, %lld bytes",
770 (unsigned long long) output_number,
771 (unsigned long long) current_entries,
772 (unsigned long long) current_bytes);
779 Status DBImpl::InstallCompactionResults(CompactionState* compact) {
781 Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
782 compact->compaction->num_input_files(0),
783 compact->compaction->level(),
784 compact->compaction->num_input_files(1),
785 compact->compaction->level() + 1,
786 static_cast<long long>(compact->total_bytes));
788 // Add compaction outputs
789 compact->compaction->AddInputDeletions(compact->compaction->edit());
790 const int level = compact->compaction->level();
791 for (size_t i = 0; i < compact->outputs.size(); i++) {
792 const CompactionState::Output& out = compact->outputs[i];
793 compact->compaction->edit()->AddFile(
795 out.number, out.file_size, out.smallest, out.largest);
796 pending_outputs_.erase(out.number);
798 compact->outputs.clear();
800 Status s = versions_->LogAndApply(compact->compaction->edit(), &mutex_);
802 compact->compaction->ReleaseInputs();
803 DeleteObsoleteFiles();
805 // Discard any files we may have created during this failed compaction
806 for (size_t i = 0; i < compact->outputs.size(); i++) {
807 env_->DeleteFile(TableFileName(dbname_, compact->outputs[i].number));
813 Status DBImpl::DoCompactionWork(CompactionState* compact) {
814 const uint64_t start_micros = env_->NowMicros();
815 int64_t imm_micros = 0; // Micros spent doing imm_ compactions
817 Log(options_.info_log, "Compacting %d@%d + %d@%d files",
818 compact->compaction->num_input_files(0),
819 compact->compaction->level(),
820 compact->compaction->num_input_files(1),
821 compact->compaction->level() + 1);
823 assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
824 assert(compact->builder == NULL);
825 assert(compact->outfile == NULL);
826 if (snapshots_.empty()) {
827 compact->smallest_snapshot = versions_->LastSequence();
829 compact->smallest_snapshot = snapshots_.oldest()->number_;
832 // Release mutex while we're actually doing the compaction work
835 Iterator* input = versions_->MakeInputIterator(compact->compaction);
836 input->SeekToFirst();
838 ParsedInternalKey ikey;
839 std::string current_user_key;
840 bool has_current_user_key = false;
841 SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
842 for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
843 // Prioritize immutable compaction work
844 if (has_imm_.NoBarrier_Load() != NULL) {
845 const uint64_t imm_start = env_->NowMicros();
849 bg_cv_.SignalAll(); // Wakeup MakeRoomForWrite() if necessary
852 imm_micros += (env_->NowMicros() - imm_start);
855 Slice key = input->key();
856 if (compact->compaction->ShouldStopBefore(key) &&
857 compact->builder != NULL) {
858 status = FinishCompactionOutputFile(compact, input);
864 // Handle key/value, add to state, etc.
866 if (!ParseInternalKey(key, &ikey)) {
867 // Do not hide error keys
868 current_user_key.clear();
869 has_current_user_key = false;
870 last_sequence_for_key = kMaxSequenceNumber;
872 if (!has_current_user_key ||
873 user_comparator()->Compare(ikey.user_key,
874 Slice(current_user_key)) != 0) {
875 // First occurrence of this user key
876 current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
877 has_current_user_key = true;
878 last_sequence_for_key = kMaxSequenceNumber;
881 if (last_sequence_for_key <= compact->smallest_snapshot) {
882 // Hidden by an newer entry for same user key
884 } else if (ikey.type == kTypeDeletion &&
885 ikey.sequence <= compact->smallest_snapshot &&
886 compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
887 // For this user key:
888 // (1) there is no data in higher levels
889 // (2) data in lower levels will have larger sequence numbers
890 // (3) data in layers that are being compacted here and have
891 // smaller sequence numbers will be dropped in the next
892 // few iterations of this loop (by rule (A) above).
893 // Therefore this deletion marker is obsolete and can be dropped.
897 last_sequence_for_key = ikey.sequence;
900 Log(options_.info_log,
901 " Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, "
902 "%d smallest_snapshot: %d",
903 ikey.user_key.ToString().c_str(),
904 (int)ikey.sequence, ikey.type, kTypeValue, drop,
905 compact->compaction->IsBaseLevelForKey(ikey.user_key),
906 (int)last_sequence_for_key, (int)compact->smallest_snapshot);
910 // Open output file if necessary
911 if (compact->builder == NULL) {
912 status = OpenCompactionOutputFile(compact);
917 if (compact->builder->NumEntries() == 0) {
918 compact->current_output()->smallest.DecodeFrom(key);
920 compact->current_output()->largest.DecodeFrom(key);
921 compact->builder->Add(key, input->value());
923 // Close output file if it is big enough
924 if (compact->builder->FileSize() >=
925 compact->compaction->MaxOutputFileSize()) {
926 status = FinishCompactionOutputFile(compact, input);
936 if (status.ok() && shutting_down_.Acquire_Load()) {
937 status = Status::IOError("Deleting DB during compaction");
939 if (status.ok() && compact->builder != NULL) {
940 status = FinishCompactionOutputFile(compact, input);
943 status = input->status();
948 CompactionStats stats;
949 stats.micros = env_->NowMicros() - start_micros - imm_micros;
950 for (int which = 0; which < 2; which++) {
951 for (int i = 0; i < compact->compaction->num_input_files(which); i++) {
952 stats.bytes_read += compact->compaction->input(which, i)->file_size;
955 for (size_t i = 0; i < compact->outputs.size(); i++) {
956 stats.bytes_written += compact->outputs[i].file_size;
960 stats_[compact->compaction->level() + 1].Add(stats);
963 status = InstallCompactionResults(compact);
965 VersionSet::LevelSummaryStorage tmp;
966 Log(options_.info_log,
967 "compacted to: %s", versions_->LevelSummary(&tmp));
979 static void CleanupIteratorState(void* arg1, void* arg2) {
980 IterState* state = reinterpret_cast<IterState*>(arg1);
983 if (state->imm != NULL) state->imm->Unref();
984 state->version->Unref();
990 Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
991 SequenceNumber* latest_snapshot) {
992 IterState* cleanup = new IterState;
994 *latest_snapshot = versions_->LastSequence();
996 // Collect together all needed child iterators
997 std::vector<Iterator*> list;
998 list.push_back(mem_->NewIterator());
1001 list.push_back(imm_->NewIterator());
1004 versions_->current()->AddIterators(options, &list);
1005 Iterator* internal_iter =
1006 NewMergingIterator(&internal_comparator_, &list[0], list.size());
1007 versions_->current()->Ref();
1009 cleanup->mu = &mutex_;
1010 cleanup->mem = mem_;
1011 cleanup->imm = imm_;
1012 cleanup->version = versions_->current();
1013 internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL);
1016 return internal_iter;
1019 Iterator* DBImpl::TEST_NewInternalIterator() {
1020 SequenceNumber ignored;
1021 return NewInternalIterator(ReadOptions(), &ignored);
1024 int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
1025 MutexLock l(&mutex_);
1026 return versions_->MaxNextLevelOverlappingBytes();
1029 Status DBImpl::Get(const ReadOptions& options,
1031 std::string* value) {
1033 MutexLock l(&mutex_);
1034 SequenceNumber snapshot;
1035 if (options.snapshot != NULL) {
1036 snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
1038 snapshot = versions_->LastSequence();
1041 MemTable* mem = mem_;
1042 MemTable* imm = imm_;
1043 Version* current = versions_->current();
1045 if (imm != NULL) imm->Ref();
1048 bool have_stat_update = false;
1049 Version::GetStats stats;
1051 // Unlock while reading from files and memtables
1054 // First look in the memtable, then in the immutable memtable (if any).
1055 LookupKey lkey(key, snapshot);
1056 if (mem->Get(lkey, value, &s)) {
1058 } else if (imm != NULL && imm->Get(lkey, value, &s)) {
1061 s = current->Get(options, lkey, value, &stats);
1062 have_stat_update = true;
1067 if (have_stat_update && current->UpdateStats(stats)) {
1068 MaybeScheduleCompaction();
1071 if (imm != NULL) imm->Unref();
1076 Iterator* DBImpl::NewIterator(const ReadOptions& options) {
1077 SequenceNumber latest_snapshot;
1078 Iterator* internal_iter = NewInternalIterator(options, &latest_snapshot);
1079 return NewDBIterator(
1080 &dbname_, env_, user_comparator(), internal_iter,
1081 (options.snapshot != NULL
1082 ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
1083 : latest_snapshot));
1086 const Snapshot* DBImpl::GetSnapshot() {
1087 MutexLock l(&mutex_);
1088 return snapshots_.New(versions_->LastSequence());
1091 void DBImpl::ReleaseSnapshot(const Snapshot* s) {
1092 MutexLock l(&mutex_);
1093 snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
1096 // Convenience methods
1097 Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
1098 return DB::Put(o, key, val);
1101 Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
1102 return DB::Delete(options, key);
1105 // There is at most one thread that is the current logger. This call
1106 // waits until preceding logger(s) have finished and becomes the
1108 void DBImpl::AcquireLoggingResponsibility(LoggerId* self) {
1109 while (logger_ != NULL) {
1115 void DBImpl::ReleaseLoggingResponsibility(LoggerId* self) {
1116 assert(logger_ == self);
1118 logger_cv_.SignalAll();
1121 Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
1123 MutexLock l(&mutex_);
1125 AcquireLoggingResponsibility(&self);
1126 status = MakeRoomForWrite(false); // May temporarily release lock and wait
1127 uint64_t last_sequence = versions_->LastSequence();
1129 WriteBatchInternal::SetSequence(updates, last_sequence + 1);
1130 last_sequence += WriteBatchInternal::Count(updates);
1132 // Add to log and apply to memtable. We can release the lock during
1133 // this phase since the "logger_" flag protects against concurrent
1134 // loggers and concurrent writes into mem_.
1136 assert(logger_ == &self);
1138 status = log_->AddRecord(WriteBatchInternal::Contents(updates));
1139 if (status.ok() && options.sync) {
1140 status = logfile_->Sync();
1143 status = WriteBatchInternal::InsertInto(updates, mem_);
1146 assert(logger_ == &self);
1149 versions_->SetLastSequence(last_sequence);
1151 ReleaseLoggingResponsibility(&self);
1155 // REQUIRES: mutex_ is held
1156 // REQUIRES: this thread is the current logger
1157 Status DBImpl::MakeRoomForWrite(bool force) {
1158 mutex_.AssertHeld();
1159 assert(logger_ != NULL);
1160 bool allow_delay = !force;
1163 if (!bg_error_.ok()) {
1164 // Yield previous error
1169 versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
1170 // We are getting close to hitting a hard limit on the number of
1171 // L0 files. Rather than delaying a single write by several
1172 // seconds when we hit the hard limit, start delaying each
1173 // individual write by 1ms to reduce latency variance. Also,
1174 // this delay hands over some CPU to the compaction thread in
1175 // case it is sharing the same core as the writer.
1177 env_->SleepForMicroseconds(1000);
1178 allow_delay = false; // Do not delay a single write more than once
1180 } else if (!force &&
1181 (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
1182 // There is room in current memtable
1184 } else if (imm_ != NULL) {
1185 // We have filled up the current memtable, but the previous
1186 // one is still being compacted, so we wait.
1188 } else if (versions_->NumLevelFiles(0) >= config::kL0_StopWritesTrigger) {
1189 // There are too many level-0 files.
1190 Log(options_.info_log, "waiting...\n");
1193 // Attempt to switch to a new memtable and trigger compaction of old
1194 assert(versions_->PrevLogNumber() == 0);
1195 uint64_t new_log_number = versions_->NewFileNumber();
1196 WritableFile* lfile = NULL;
1197 s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
1204 logfile_number_ = new_log_number;
1205 log_ = new log::Writer(lfile);
1207 has_imm_.Release_Store(imm_);
1208 mem_ = new MemTable(internal_comparator_);
1210 force = false; // Do not force another compaction if have room
1211 MaybeScheduleCompaction();
1217 bool DBImpl::GetProperty(const Slice& property, std::string* value) {
1220 MutexLock l(&mutex_);
1221 Slice in = property;
1222 Slice prefix("leveldb.");
1223 if (!in.starts_with(prefix)) return false;
1224 in.remove_prefix(prefix.size());
1226 if (in.starts_with("num-files-at-level")) {
1227 in.remove_prefix(strlen("num-files-at-level"));
1229 bool ok = ConsumeDecimalNumber(&in, &level) && in.empty();
1230 if (!ok || level >= config::kNumLevels) {
1234 snprintf(buf, sizeof(buf), "%d",
1235 versions_->NumLevelFiles(static_cast<int>(level)));
1239 } else if (in == "stats") {
1241 snprintf(buf, sizeof(buf),
1243 "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
1244 "--------------------------------------------------\n"
1247 for (int level = 0; level < config::kNumLevels; level++) {
1248 int files = versions_->NumLevelFiles(level);
1249 if (stats_[level].micros > 0 || files > 0) {
1252 "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
1255 versions_->NumLevelBytes(level) / 1048576.0,
1256 stats_[level].micros / 1e6,
1257 stats_[level].bytes_read / 1048576.0,
1258 stats_[level].bytes_written / 1048576.0);
1263 } else if (in == "sstables") {
1264 *value = versions_->current()->DebugString();
1271 void DBImpl::GetApproximateSizes(
1272 const Range* range, int n,
1274 // TODO(opt): better implementation
1277 MutexLock l(&mutex_);
1278 versions_->current()->Ref();
1279 v = versions_->current();
1282 for (int i = 0; i < n; i++) {
1283 // Convert user_key into a corresponding internal key.
1284 InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
1285 InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
1286 uint64_t start = versions_->ApproximateOffsetOf(v, k1);
1287 uint64_t limit = versions_->ApproximateOffsetOf(v, k2);
1288 sizes[i] = (limit >= start ? limit - start : 0);
1292 MutexLock l(&mutex_);
1297 // Default implementations of convenience methods that subclasses of DB
1298 // can call if they wish
1299 Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
1301 batch.Put(key, value);
1302 return Write(opt, &batch);
1305 Status DB::Delete(const WriteOptions& opt, const Slice& key) {
1308 return Write(opt, &batch);
1313 Status DB::Open(const Options& options, const std::string& dbname,
1317 DBImpl* impl = new DBImpl(options, dbname);
1318 impl->mutex_.Lock();
1320 Status s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
1322 uint64_t new_log_number = impl->versions_->NewFileNumber();
1323 WritableFile* lfile;
1324 s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
1327 edit.SetLogNumber(new_log_number);
1328 impl->logfile_ = lfile;
1329 impl->logfile_number_ = new_log_number;
1330 impl->log_ = new log::Writer(lfile);
1331 s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
1334 impl->DeleteObsoleteFiles();
1335 impl->MaybeScheduleCompaction();
1338 impl->mutex_.Unlock();
1347 Snapshot::~Snapshot() {
1350 Status DestroyDB(const std::string& dbname, const Options& options) {
1351 Env* env = options.env;
1352 std::vector<std::string> filenames;
1353 // Ignore error in case directory does not exist
1354 env->GetChildren(dbname, &filenames);
1355 if (filenames.empty()) {
1356 return Status::OK();
1360 const std::string lockname = LockFileName(dbname);
1361 Status result = env->LockFile(lockname, &lock);
1365 for (size_t i = 0; i < filenames.size(); i++) {
1366 if (ParseFileName(filenames[i], &number, &type) &&
1367 filenames[i] != lockname) { // Lock file will be deleted at end
1368 Status del = env->DeleteFile(dbname + "/" + filenames[i]);
1369 if (result.ok() && !del.ok()) {
1374 env->UnlockFile(lock); // Ignore error since state is already gone
1375 env->DeleteFile(lockname);
1376 env->DeleteDir(dbname); // Ignore error in case dir contains other files
1381 } // namespace leveldb