be489b26a775ec30444cee967e2c75cc3411c784
[platform/framework/web/crosswalk.git] / src / sync / syncable / directory.cc
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "sync/syncable/directory.h"
6
7 #include <iterator>
8
9 #include "base/base64.h"
10 #include "base/debug/trace_event.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "sync/internal_api/public/base/attachment_id_proto.h"
14 #include "sync/internal_api/public/base/unique_position.h"
15 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
16 #include "sync/syncable/entry.h"
17 #include "sync/syncable/entry_kernel.h"
18 #include "sync/syncable/in_memory_directory_backing_store.h"
19 #include "sync/syncable/on_disk_directory_backing_store.h"
20 #include "sync/syncable/scoped_kernel_lock.h"
21 #include "sync/syncable/scoped_parent_child_index_updater.h"
22 #include "sync/syncable/syncable-inl.h"
23 #include "sync/syncable/syncable_base_transaction.h"
24 #include "sync/syncable/syncable_changes_version.h"
25 #include "sync/syncable/syncable_read_transaction.h"
26 #include "sync/syncable/syncable_util.h"
27 #include "sync/syncable/syncable_write_transaction.h"
28
29 using std::string;
30
31 namespace syncer {
32 namespace syncable {
33
34 // static
35 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
36     FILE_PATH_LITERAL("SyncData.sqlite3");
37
38 Directory::PersistedKernelInfo::PersistedKernelInfo()
39     : next_id(0) {
40   ModelTypeSet protocol_types = ProtocolTypes();
41   for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
42        iter.Inc()) {
43     ResetDownloadProgress(iter.Get());
44     transaction_version[iter.Get()] = 0;
45   }
46 }
47
48 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
49
50 void Directory::PersistedKernelInfo::ResetDownloadProgress(
51     ModelType model_type) {
52   // Clear everything except the data type id field.
53   download_progress[model_type].Clear();
54   download_progress[model_type].set_data_type_id(
55       GetSpecificsFieldNumberFromModelType(model_type));
56
57   // Explicitly set an empty token field to denote no progress.
58   download_progress[model_type].set_token("");
59 }
60
61 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
62     : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
63 }
64
65 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
66   STLDeleteElements(&dirty_metas);
67   STLDeleteElements(&delete_journals);
68 }
69
70 Directory::Kernel::Kernel(
71     const std::string& name,
72     const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
73     const WeakHandle<TransactionObserver>& transaction_observer)
74     : next_write_transaction_id(0),
75       name(name),
76       info_status(Directory::KERNEL_SHARE_INFO_VALID),
77       persisted_info(info.kernel_info),
78       cache_guid(info.cache_guid),
79       next_metahandle(info.max_metahandle + 1),
80       delegate(delegate),
81       transaction_observer(transaction_observer) {
82   DCHECK(delegate);
83   DCHECK(transaction_observer.IsInitialized());
84 }
85
86 Directory::Kernel::~Kernel() {
87   STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
88                                        metahandles_map.end());
89 }
90
91 Directory::Directory(
92     DirectoryBackingStore* store,
93     UnrecoverableErrorHandler* unrecoverable_error_handler,
94     ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
95     NigoriHandler* nigori_handler,
96     Cryptographer* cryptographer)
97     : kernel_(NULL),
98       store_(store),
99       unrecoverable_error_handler_(unrecoverable_error_handler),
100       report_unrecoverable_error_function_(
101           report_unrecoverable_error_function),
102       unrecoverable_error_set_(false),
103       nigori_handler_(nigori_handler),
104       cryptographer_(cryptographer),
105       invariant_check_level_(VERIFY_CHANGES) {
106 }
107
108 Directory::~Directory() {
109   Close();
110 }
111
112 DirOpenResult Directory::Open(
113     const string& name,
114     DirectoryChangeDelegate* delegate,
115     const WeakHandle<TransactionObserver>& transaction_observer) {
116   TRACE_EVENT0("sync", "SyncDatabaseOpen");
117
118   const DirOpenResult result =
119       OpenImpl(name, delegate, transaction_observer);
120
121   if (OPENED != result)
122     Close();
123   return result;
124 }
125
126 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
127   ScopedKernelLock lock(this);
128   kernel_->metahandles_map.swap(*handles_map);
129   for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
130        it != kernel_->metahandles_map.end(); ++it) {
131     EntryKernel* entry = it->second;
132     if (ParentChildIndex::ShouldInclude(entry))
133       kernel_->parent_child_index.Insert(entry);
134     const int64 metahandle = entry->ref(META_HANDLE);
135     if (entry->ref(IS_UNSYNCED))
136       kernel_->unsynced_metahandles.insert(metahandle);
137     if (entry->ref(IS_UNAPPLIED_UPDATE)) {
138       const ModelType type = entry->GetServerModelType();
139       kernel_->unapplied_update_metahandles[type].insert(metahandle);
140     }
141     if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
142       DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
143              kernel_->server_tags_map.end())
144           << "Unexpected duplicate use of client tag";
145       kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
146     }
147     if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
148       DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
149              kernel_->server_tags_map.end())
150           << "Unexpected duplicate use of server tag";
151       kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
152     }
153     DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
154            kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
155     kernel_->ids_map[entry->ref(ID).value()] = entry;
156     DCHECK(!entry->is_dirty());
157     AddToAttachmentIndex(metahandle, entry->ref(ATTACHMENT_METADATA), lock);
158   }
159 }
160
161 DirOpenResult Directory::OpenImpl(
162     const string& name,
163     DirectoryChangeDelegate* delegate,
164     const WeakHandle<TransactionObserver>&
165         transaction_observer) {
166   KernelLoadInfo info;
167   // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
168   // swap these later.
169   Directory::MetahandlesMap tmp_handles_map;
170
171   // Avoids mem leaks on failure.  Harmlessly deletes the empty hash map after
172   // the swap in the success case.
173   STLValueDeleter<Directory::MetahandlesMap> deleter(&tmp_handles_map);
174
175   JournalIndex delete_journals;
176
177   DirOpenResult result =
178       store_->Load(&tmp_handles_map, &delete_journals, &info);
179   if (OPENED != result)
180     return result;
181
182   kernel_ = new Kernel(name, info, delegate, transaction_observer);
183   delete_journal_.reset(new DeleteJournal(&delete_journals));
184   InitializeIndices(&tmp_handles_map);
185
186   // Write back the share info to reserve some space in 'next_id'.  This will
187   // prevent local ID reuse in the case of an early crash.  See the comments in
188   // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
189   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
190   if (!SaveChanges())
191     return FAILED_INITIAL_WRITE;
192
193   return OPENED;
194 }
195
196 DeleteJournal* Directory::delete_journal() {
197   DCHECK(delete_journal_.get());
198   return delete_journal_.get();
199 }
200
201 void Directory::Close() {
202   store_.reset();
203   if (kernel_) {
204     delete kernel_;
205     kernel_ = NULL;
206   }
207 }
208
209 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
210                                      const tracked_objects::Location& location,
211                                      const std::string & message) {
212   DCHECK(trans != NULL);
213   unrecoverable_error_set_ = true;
214   unrecoverable_error_handler_->OnUnrecoverableError(location,
215                                                      message);
216 }
217
218 EntryKernel* Directory::GetEntryById(const Id& id) {
219   ScopedKernelLock lock(this);
220   return GetEntryById(id, &lock);
221 }
222
223 EntryKernel* Directory::GetEntryById(const Id& id,
224                                      ScopedKernelLock* const lock) {
225   DCHECK(kernel_);
226   // Find it in the in memory ID index.
227   IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
228   if (id_found != kernel_->ids_map.end()) {
229     return id_found->second;
230   }
231   return NULL;
232 }
233
234 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
235   ScopedKernelLock lock(this);
236   DCHECK(kernel_);
237
238   TagsMap::iterator it = kernel_->client_tags_map.find(tag);
239   if (it != kernel_->client_tags_map.end()) {
240     return it->second;
241   }
242   return NULL;
243 }
244
245 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
246   ScopedKernelLock lock(this);
247   DCHECK(kernel_);
248   TagsMap::iterator it = kernel_->server_tags_map.find(tag);
249   if (it != kernel_->server_tags_map.end()) {
250     return it->second;
251   }
252   return NULL;
253 }
254
255 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
256   ScopedKernelLock lock(this);
257   return GetEntryByHandle(metahandle, &lock);
258 }
259
260 EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
261                                          ScopedKernelLock* lock) {
262   // Look up in memory
263   MetahandlesMap::iterator found =
264       kernel_->metahandles_map.find(metahandle);
265   if (found != kernel_->metahandles_map.end()) {
266     // Found it in memory.  Easy.
267     return found->second;
268   }
269   return NULL;
270 }
271
272 bool Directory::GetChildHandlesById(
273     BaseTransaction* trans, const Id& parent_id,
274     Directory::Metahandles* result) {
275   if (!SyncAssert(this == trans->directory(), FROM_HERE,
276                   "Directories don't match", trans))
277     return false;
278   result->clear();
279
280   ScopedKernelLock lock(this);
281   AppendChildHandles(lock, parent_id, result);
282   return true;
283 }
284
285 int Directory::GetTotalNodeCount(
286     BaseTransaction* trans,
287     EntryKernel* kernel) const {
288   if (!SyncAssert(this == trans->directory(), FROM_HERE,
289                   "Directories don't match", trans))
290     return false;
291
292   int count = 1;
293   std::deque<const OrderedChildSet*> child_sets;
294
295   GetChildSetForKernel(trans, kernel, &child_sets);
296   while (!child_sets.empty()) {
297     const OrderedChildSet* set = child_sets.front();
298     child_sets.pop_front();
299     for (OrderedChildSet::const_iterator it = set->begin();
300          it != set->end(); ++it) {
301       count++;
302       GetChildSetForKernel(trans, *it, &child_sets);
303     }
304   }
305
306   return count;
307 }
308
309 void Directory::GetChildSetForKernel(
310     BaseTransaction* trans,
311     EntryKernel* kernel,
312     std::deque<const OrderedChildSet*>* child_sets) const {
313   if (!kernel->ref(IS_DIR))
314     return;  // Not a directory => no children.
315
316   const OrderedChildSet* descendants =
317       kernel_->parent_child_index.GetChildren(kernel->ref(ID));
318   if (!descendants)
319     return;  // This directory has no children.
320
321   // Add our children to the list of items to be traversed.
322   child_sets->push_back(descendants);
323 }
324
325 int Directory::GetPositionIndex(
326     BaseTransaction* trans,
327     EntryKernel* kernel) const {
328   const OrderedChildSet* siblings =
329       kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID));
330
331   OrderedChildSet::const_iterator it = siblings->find(kernel);
332   return std::distance(siblings->begin(), it);
333 }
334
335 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
336   ScopedKernelLock lock(this);
337   return InsertEntry(trans, entry, &lock);
338 }
339
340 bool Directory::InsertEntry(BaseWriteTransaction* trans,
341                             EntryKernel* entry,
342                             ScopedKernelLock* lock) {
343   DCHECK(NULL != lock);
344   if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
345     return false;
346
347   static const char error[] = "Entry already in memory index.";
348
349   if (!SyncAssert(
350           kernel_->metahandles_map.insert(
351               std::make_pair(entry->ref(META_HANDLE), entry)).second,
352           FROM_HERE,
353           error,
354           trans)) {
355     return false;
356   }
357   if (!SyncAssert(
358           kernel_->ids_map.insert(
359               std::make_pair(entry->ref(ID).value(), entry)).second,
360           FROM_HERE,
361           error,
362           trans)) {
363     return false;
364   }
365   if (ParentChildIndex::ShouldInclude(entry)) {
366     if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
367                     FROM_HERE,
368                     error,
369                     trans)) {
370       return false;
371     }
372   }
373   AddToAttachmentIndex(
374       entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA), *lock);
375
376   // Should NEVER be created with a client tag or server tag.
377   if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
378                   "Server tag should be empty", trans)) {
379     return false;
380   }
381   if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
382                   "Client tag should be empty", trans))
383     return false;
384
385   return true;
386 }
387
388 bool Directory::ReindexId(BaseWriteTransaction* trans,
389                           EntryKernel* const entry,
390                           const Id& new_id) {
391   ScopedKernelLock lock(this);
392   if (NULL != GetEntryById(new_id, &lock))
393     return false;
394
395   {
396     // Update the indices that depend on the ID field.
397     ScopedParentChildIndexUpdater updater_b(lock, entry,
398         &kernel_->parent_child_index);
399     size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
400     DCHECK_EQ(1U, num_erased);
401     entry->put(ID, new_id);
402     kernel_->ids_map[entry->ref(ID).value()] = entry;
403   }
404   return true;
405 }
406
407 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
408                                 EntryKernel* const entry,
409                                 const Id& new_parent_id) {
410   ScopedKernelLock lock(this);
411
412   {
413     // Update the indices that depend on the PARENT_ID field.
414     ScopedParentChildIndexUpdater index_updater(lock, entry,
415         &kernel_->parent_child_index);
416     entry->put(PARENT_ID, new_parent_id);
417   }
418   return true;
419 }
420
421 void Directory::RemoveFromAttachmentIndex(
422     const int64 metahandle,
423     const sync_pb::AttachmentMetadata& attachment_metadata,
424     const ScopedKernelLock& lock) {
425   for (int i = 0; i < attachment_metadata.record_size(); ++i) {
426     AttachmentIdUniqueId unique_id =
427         attachment_metadata.record(i).id().unique_id();
428     IndexByAttachmentId::iterator iter =
429         kernel_->index_by_attachment_id.find(unique_id);
430     if (iter != kernel_->index_by_attachment_id.end()) {
431       iter->second.erase(metahandle);
432       if (iter->second.empty()) {
433         kernel_->index_by_attachment_id.erase(iter);
434       }
435     }
436   }
437 }
438
439 void Directory::AddToAttachmentIndex(
440     const int64 metahandle,
441     const sync_pb::AttachmentMetadata& attachment_metadata,
442     const ScopedKernelLock& lock) {
443   for (int i = 0; i < attachment_metadata.record_size(); ++i) {
444     AttachmentIdUniqueId unique_id =
445         attachment_metadata.record(i).id().unique_id();
446     IndexByAttachmentId::iterator iter =
447         kernel_->index_by_attachment_id.find(unique_id);
448     if (iter == kernel_->index_by_attachment_id.end()) {
449       iter = kernel_->index_by_attachment_id.insert(std::make_pair(
450                                                         unique_id,
451                                                         MetahandleSet())).first;
452     }
453     iter->second.insert(metahandle);
454   }
455 }
456
457 void Directory::UpdateAttachmentIndex(
458     const int64 metahandle,
459     const sync_pb::AttachmentMetadata& old_metadata,
460     const sync_pb::AttachmentMetadata& new_metadata) {
461   ScopedKernelLock lock(this);
462   RemoveFromAttachmentIndex(metahandle, old_metadata, lock);
463   AddToAttachmentIndex(metahandle, new_metadata, lock);
464 }
465
466 void Directory::GetMetahandlesByAttachmentId(
467     BaseTransaction* trans,
468     const sync_pb::AttachmentIdProto& attachment_id_proto,
469     Metahandles* result) {
470   DCHECK(result);
471   result->clear();
472   ScopedKernelLock lock(this);
473   IndexByAttachmentId::const_iterator index_iter =
474       kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
475   if (index_iter == kernel_->index_by_attachment_id.end())
476     return;
477   const MetahandleSet& metahandle_set = index_iter->second;
478   std::copy(
479       metahandle_set.begin(), metahandle_set.end(), back_inserter(*result));
480 }
481
482 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
483   DCHECK(trans != NULL);
484   return unrecoverable_error_set_;
485 }
486
487 void Directory::ClearDirtyMetahandles() {
488   kernel_->transaction_mutex.AssertAcquired();
489   kernel_->dirty_metahandles.clear();
490 }
491
492 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
493                                       const EntryKernel* const entry) const {
494   bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
495       !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
496       !entry->ref(IS_UNSYNCED);
497
498   if (safe) {
499     int64 handle = entry->ref(META_HANDLE);
500     const ModelType type = entry->GetServerModelType();
501     if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
502                     FROM_HERE,
503                     "Dirty metahandles should be empty", trans))
504       return false;
505     // TODO(tim): Bug 49278.
506     if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
507                     FROM_HERE,
508                     "Unsynced handles should be empty",
509                     trans))
510       return false;
511     if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
512                     FROM_HERE,
513                     "Unapplied metahandles should be empty",
514                     trans))
515       return false;
516   }
517
518   return safe;
519 }
520
521 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
522   ReadTransaction trans(FROM_HERE, this);
523   ScopedKernelLock lock(this);
524
525   // If there is an unrecoverable error then just bail out.
526   if (unrecoverable_error_set(&trans))
527     return;
528
529   // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
530   // clear dirty flags.
531   for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
532        i != kernel_->dirty_metahandles.end(); ++i) {
533     EntryKernel* entry = GetEntryByHandle(*i, &lock);
534     if (!entry)
535       continue;
536     // Skip over false positives; it happens relatively infrequently.
537     if (!entry->is_dirty())
538       continue;
539     snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
540                                  new EntryKernel(*entry));
541     DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
542     // We don't bother removing from the index here as we blow the entire thing
543     // in a moment, and it unnecessarily complicates iteration.
544     entry->clear_dirty(NULL);
545   }
546   ClearDirtyMetahandles();
547
548   // Set purged handles.
549   DCHECK(snapshot->metahandles_to_purge.empty());
550   snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
551
552   // Fill kernel_info_status and kernel_info.
553   snapshot->kernel_info = kernel_->persisted_info;
554   // To avoid duplicates when the process crashes, we record the next_id to be
555   // greater magnitude than could possibly be reached before the next save
556   // changes.  In other words, it's effectively impossible for the user to
557   // generate 65536 new bookmarks in 3 seconds.
558   snapshot->kernel_info.next_id -= 65536;
559   snapshot->kernel_info_status = kernel_->info_status;
560   // This one we reset on failure.
561   kernel_->info_status = KERNEL_SHARE_INFO_VALID;
562
563   delete_journal_->TakeSnapshotAndClear(
564       &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
565 }
566
567 bool Directory::SaveChanges() {
568   bool success = false;
569
570   base::AutoLock scoped_lock(kernel_->save_changes_mutex);
571
572   // Snapshot and save.
573   SaveChangesSnapshot snapshot;
574   TakeSnapshotForSaveChanges(&snapshot);
575   success = store_->SaveChanges(snapshot);
576
577   // Handle success or failure.
578   if (success)
579     success = VacuumAfterSaveChanges(snapshot);
580   else
581     HandleSaveChangesFailure(snapshot);
582   return success;
583 }
584
585 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
586   if (snapshot.dirty_metas.empty())
587     return true;
588
589   // Need a write transaction as we are about to permanently purge entries.
590   WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
591   ScopedKernelLock lock(this);
592   // Now drop everything we can out of memory.
593   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
594        i != snapshot.dirty_metas.end(); ++i) {
595     MetahandlesMap::iterator found =
596         kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
597     EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
598                           NULL : found->second);
599     if (entry && SafeToPurgeFromMemory(&trans, entry)) {
600       // We now drop deleted metahandles that are up to date on both the client
601       // and the server.
602       size_t num_erased = 0;
603       num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
604       DCHECK_EQ(1u, num_erased);
605       num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
606       DCHECK_EQ(1u, num_erased);
607       if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
608         num_erased =
609             kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
610         DCHECK_EQ(1u, num_erased);
611       }
612       if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
613         num_erased =
614             kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
615         DCHECK_EQ(1u, num_erased);
616       }
617       if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
618                       FROM_HERE,
619                       "Deleted entry still present",
620                       (&trans)))
621         return false;
622       RemoveFromAttachmentIndex(
623           entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA), lock);
624
625       delete entry;
626     }
627     if (trans.unrecoverable_error_set())
628       return false;
629   }
630   return true;
631 }
632
633 void Directory::UnapplyEntry(EntryKernel* entry) {
634   int64 handle = entry->ref(META_HANDLE);
635   ModelType server_type = GetModelTypeFromSpecifics(
636       entry->ref(SERVER_SPECIFICS));
637
638   // Clear enough so that on the next sync cycle all local data will
639   // be overwritten.
640   // Note: do not modify the root node in order to preserve the
641   // initial sync ended bit for this type (else on the next restart
642   // this type will be treated as disabled and therefore fully purged).
643   if (IsRealDataType(server_type) &&
644       ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
645     return;
646   }
647
648   // Set the unapplied bit if this item has server data.
649   if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
650     entry->put(IS_UNAPPLIED_UPDATE, true);
651     kernel_->unapplied_update_metahandles[server_type].insert(handle);
652     entry->mark_dirty(&kernel_->dirty_metahandles);
653   }
654
655   // Unset the unsynced bit.
656   if (entry->ref(IS_UNSYNCED)) {
657     kernel_->unsynced_metahandles.erase(handle);
658     entry->put(IS_UNSYNCED, false);
659     entry->mark_dirty(&kernel_->dirty_metahandles);
660   }
661
662   // Mark the item as locally deleted. No deleted items are allowed in the
663   // parent child index.
664   if (!entry->ref(IS_DEL)) {
665     kernel_->parent_child_index.Remove(entry);
666     entry->put(IS_DEL, true);
667     entry->mark_dirty(&kernel_->dirty_metahandles);
668   }
669
670   // Set the version to the "newly created" version.
671   if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
672     entry->put(BASE_VERSION, CHANGES_VERSION);
673     entry->mark_dirty(&kernel_->dirty_metahandles);
674   }
675
676   // At this point locally created items that aren't synced will become locally
677   // deleted items, and purged on the next snapshot. All other items will match
678   // the state they would have had if they were just created via a server
679   // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
680 }
681
682 void Directory::DeleteEntry(bool save_to_journal,
683                             EntryKernel* entry,
684                             EntryKernelSet* entries_to_journal,
685                             const ScopedKernelLock& lock) {
686   int64 handle = entry->ref(META_HANDLE);
687   ModelType server_type = GetModelTypeFromSpecifics(
688       entry->ref(SERVER_SPECIFICS));
689
690   kernel_->metahandles_to_purge.insert(handle);
691
692   size_t num_erased = 0;
693   num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
694   DCHECK_EQ(1u, num_erased);
695   num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
696   DCHECK_EQ(1u, num_erased);
697   num_erased = kernel_->unsynced_metahandles.erase(handle);
698   DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
699   num_erased =
700       kernel_->unapplied_update_metahandles[server_type].erase(handle);
701   DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
702   if (kernel_->parent_child_index.Contains(entry))
703     kernel_->parent_child_index.Remove(entry);
704
705   if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
706     num_erased =
707         kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
708     DCHECK_EQ(1u, num_erased);
709   }
710   if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
711     num_erased =
712         kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
713     DCHECK_EQ(1u, num_erased);
714   }
715   RemoveFromAttachmentIndex(handle, entry->ref(ATTACHMENT_METADATA), lock);
716
717   if (save_to_journal) {
718     entries_to_journal->insert(entry);
719   } else {
720     delete entry;
721   }
722 }
723
724 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
725                                        ModelTypeSet types_to_journal,
726                                        ModelTypeSet types_to_unapply) {
727   disabled_types.RemoveAll(ProxyTypes());
728
729   if (disabled_types.Empty())
730     return true;
731
732   {
733     WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
734
735     EntryKernelSet entries_to_journal;
736     STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
737
738     {
739       ScopedKernelLock lock(this);
740
741       // We iterate in two passes to avoid a bug in STLport (which is used in
742       // the Android build).  There are some versions of that library where a
743       // hash_map's iterators can be invalidated when an item is erased from the
744       // hash_map.
745       // See http://sourceforge.net/p/stlport/bugs/239/.
746
747       std::set<EntryKernel*> to_purge;
748       for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
749            it != kernel_->metahandles_map.end(); ++it) {
750         const sync_pb::EntitySpecifics& local_specifics =
751             it->second->ref(SPECIFICS);
752         const sync_pb::EntitySpecifics& server_specifics =
753             it->second->ref(SERVER_SPECIFICS);
754         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
755         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
756
757         if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
758             (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
759           to_purge.insert(it->second);
760         }
761       }
762
763       for (std::set<EntryKernel*>::iterator it = to_purge.begin();
764            it != to_purge.end(); ++it) {
765         EntryKernel* entry = *it;
766
767         const sync_pb::EntitySpecifics& local_specifics =
768             (*it)->ref(SPECIFICS);
769         const sync_pb::EntitySpecifics& server_specifics =
770             (*it)->ref(SERVER_SPECIFICS);
771         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
772         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
773
774         if (types_to_unapply.Has(local_type) ||
775             types_to_unapply.Has(server_type)) {
776           UnapplyEntry(entry);
777         } else {
778           bool save_to_journal =
779               (types_to_journal.Has(local_type) ||
780                types_to_journal.Has(server_type)) &&
781               (delete_journal_->IsDeleteJournalEnabled(local_type) ||
782                delete_journal_->IsDeleteJournalEnabled(server_type));
783           DeleteEntry(save_to_journal, entry, &entries_to_journal, lock);
784         }
785       }
786
787       delete_journal_->AddJournalBatch(&trans, entries_to_journal);
788
789       // Ensure meta tracking for these data types reflects the purged state.
790       for (ModelTypeSet::Iterator it = disabled_types.First();
791            it.Good(); it.Inc()) {
792         kernel_->persisted_info.transaction_version[it.Get()] = 0;
793
794         // Don't discard progress markers or context for unapplied types.
795         if (!types_to_unapply.Has(it.Get())) {
796           kernel_->persisted_info.ResetDownloadProgress(it.Get());
797           kernel_->persisted_info.datatype_context[it.Get()].Clear();
798         }
799       }
800
801       kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
802     }
803   }
804   return true;
805 }
806
807 bool Directory::ResetVersionsForType(BaseWriteTransaction* trans,
808                                      ModelType type) {
809   if (!ProtocolTypes().Has(type))
810     return false;
811   DCHECK_NE(type, BOOKMARKS) << "Only non-hierarchical types are supported";
812
813   EntryKernel* type_root = GetEntryByServerTag(ModelTypeToRootTag(type));
814   if (!type_root)
815     return false;
816
817   ScopedKernelLock lock(this);
818   const Id& type_root_id = type_root->ref(ID);
819   Directory::Metahandles children;
820   AppendChildHandles(lock, type_root_id, &children);
821
822   for (Metahandles::iterator it = children.begin(); it != children.end();
823        ++it) {
824     EntryKernel* entry = GetEntryByHandle(*it, &lock);
825     if (!entry)
826       continue;
827     if (entry->ref(BASE_VERSION) > 1)
828       entry->put(BASE_VERSION, 1);
829     if (entry->ref(SERVER_VERSION) > 1)
830       entry->put(SERVER_VERSION, 1);
831
832     // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
833     // to ensure no in-transit data is lost.
834
835     entry->mark_dirty(&kernel_->dirty_metahandles);
836   }
837
838   return true;
839 }
840
841 bool Directory::IsAttachmentLinked(
842     const sync_pb::AttachmentIdProto& attachment_id_proto) const {
843   ScopedKernelLock lock(this);
844   IndexByAttachmentId::const_iterator iter =
845       kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
846   if (iter != kernel_->index_by_attachment_id.end() && !iter->second.empty()) {
847     return true;
848   }
849   return false;
850 }
851
852 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
853   WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
854   ScopedKernelLock lock(this);
855   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
856
857   // Because we optimistically cleared the dirty bit on the real entries when
858   // taking the snapshot, we must restore it on failure.  Not doing this could
859   // cause lost data, if no other changes are made to the in-memory entries
860   // that would cause the dirty bit to get set again. Setting the bit ensures
861   // that SaveChanges will at least try again later.
862   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
863        i != snapshot.dirty_metas.end(); ++i) {
864     MetahandlesMap::iterator found =
865         kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
866     if (found != kernel_->metahandles_map.end()) {
867       found->second->mark_dirty(&kernel_->dirty_metahandles);
868     }
869   }
870
871   kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
872                                        snapshot.metahandles_to_purge.end());
873
874   // Restore delete journals.
875   delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
876   delete_journal_->PurgeDeleteJournals(&trans,
877                                        snapshot.delete_journals_to_purge);
878 }
879
880 void Directory::GetDownloadProgress(
881     ModelType model_type,
882     sync_pb::DataTypeProgressMarker* value_out) const {
883   ScopedKernelLock lock(this);
884   return value_out->CopyFrom(
885       kernel_->persisted_info.download_progress[model_type]);
886 }
887
888 void Directory::GetDownloadProgressAsString(
889     ModelType model_type,
890     std::string* value_out) const {
891   ScopedKernelLock lock(this);
892   kernel_->persisted_info.download_progress[model_type].SerializeToString(
893       value_out);
894 }
895
896 size_t Directory::GetEntriesCount() const {
897   ScopedKernelLock lock(this);
898   return kernel_->metahandles_map.size();
899 }
900
901 void Directory::SetDownloadProgress(
902     ModelType model_type,
903     const sync_pb::DataTypeProgressMarker& new_progress) {
904   ScopedKernelLock lock(this);
905   kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
906   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
907 }
908
909 int64 Directory::GetTransactionVersion(ModelType type) const {
910   kernel_->transaction_mutex.AssertAcquired();
911   return kernel_->persisted_info.transaction_version[type];
912 }
913
914 void Directory::IncrementTransactionVersion(ModelType type) {
915   kernel_->transaction_mutex.AssertAcquired();
916   kernel_->persisted_info.transaction_version[type]++;
917 }
918
919 void Directory::GetDataTypeContext(BaseTransaction* trans,
920                                    ModelType type,
921                                    sync_pb::DataTypeContext* context) const {
922   ScopedKernelLock lock(this);
923   context->CopyFrom(kernel_->persisted_info.datatype_context[type]);
924 }
925
926 void Directory::SetDataTypeContext(
927     BaseWriteTransaction* trans,
928     ModelType type,
929     const sync_pb::DataTypeContext& context) {
930   ScopedKernelLock lock(this);
931   kernel_->persisted_info.datatype_context[type].CopyFrom(context);
932   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
933 }
934
935 ModelTypeSet Directory::InitialSyncEndedTypes() {
936   syncable::ReadTransaction trans(FROM_HERE, this);
937   ModelTypeSet protocol_types = ProtocolTypes();
938   ModelTypeSet initial_sync_ended_types;
939   for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
940     if (InitialSyncEndedForType(&trans, i.Get())) {
941       initial_sync_ended_types.Put(i.Get());
942     }
943   }
944   return initial_sync_ended_types;
945 }
946
947 bool Directory::InitialSyncEndedForType(ModelType type) {
948   syncable::ReadTransaction trans(FROM_HERE, this);
949   return InitialSyncEndedForType(&trans, type);
950 }
951
952 bool Directory::InitialSyncEndedForType(
953     BaseTransaction* trans, ModelType type) {
954   // True iff the type's root node has been received and applied.
955   syncable::Entry entry(trans, syncable::GET_TYPE_ROOT, type);
956   return entry.good() && entry.GetBaseVersion() != CHANGES_VERSION;
957 }
958
959 string Directory::store_birthday() const {
960   ScopedKernelLock lock(this);
961   return kernel_->persisted_info.store_birthday;
962 }
963
964 void Directory::set_store_birthday(const string& store_birthday) {
965   ScopedKernelLock lock(this);
966   if (kernel_->persisted_info.store_birthday == store_birthday)
967     return;
968   kernel_->persisted_info.store_birthday = store_birthday;
969   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
970 }
971
972 string Directory::bag_of_chips() const {
973   ScopedKernelLock lock(this);
974   return kernel_->persisted_info.bag_of_chips;
975 }
976
977 void Directory::set_bag_of_chips(const string& bag_of_chips) {
978   ScopedKernelLock lock(this);
979   if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
980     return;
981   kernel_->persisted_info.bag_of_chips = bag_of_chips;
982   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
983 }
984
985
986 string Directory::cache_guid() const {
987   // No need to lock since nothing ever writes to it after load.
988   return kernel_->cache_guid;
989 }
990
991 NigoriHandler* Directory::GetNigoriHandler() {
992   return nigori_handler_;
993 }
994
995 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
996   DCHECK_EQ(this, trans->directory());
997   return cryptographer_;
998 }
999
1000 void Directory::GetAllMetaHandles(BaseTransaction* trans,
1001                                   MetahandleSet* result) {
1002   result->clear();
1003   ScopedKernelLock lock(this);
1004   for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
1005        i != kernel_->metahandles_map.end(); ++i) {
1006     result->insert(i->first);
1007   }
1008 }
1009
1010 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
1011                                        Metahandles* result) {
1012   result->clear();
1013   ScopedKernelLock lock(this);
1014   copy(kernel_->unsynced_metahandles.begin(),
1015        kernel_->unsynced_metahandles.end(), back_inserter(*result));
1016 }
1017
1018 int64 Directory::unsynced_entity_count() const {
1019   ScopedKernelLock lock(this);
1020   return kernel_->unsynced_metahandles.size();
1021 }
1022
1023 bool Directory::TypeHasUnappliedUpdates(ModelType type) {
1024   ScopedKernelLock lock(this);
1025   return !kernel_->unapplied_update_metahandles[type].empty();
1026 }
1027
1028 void Directory::GetUnappliedUpdateMetaHandles(
1029     BaseTransaction* trans,
1030     FullModelTypeSet server_types,
1031     std::vector<int64>* result) {
1032   result->clear();
1033   ScopedKernelLock lock(this);
1034   for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
1035     const ModelType type = ModelTypeFromInt(i);
1036     if (server_types.Has(type)) {
1037       std::copy(kernel_->unapplied_update_metahandles[type].begin(),
1038                 kernel_->unapplied_update_metahandles[type].end(),
1039                 back_inserter(*result));
1040     }
1041   }
1042 }
1043
1044 void Directory::GetMetaHandlesOfType(BaseTransaction* trans,
1045                                      ModelType type,
1046                                      std::vector<int64>* result) {
1047   result->clear();
1048   ScopedKernelLock lock(this);
1049   for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1050        it != kernel_->metahandles_map.end(); ++it) {
1051     EntryKernel* entry = it->second;
1052     const ModelType entry_type =
1053         GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1054     if (entry_type == type)
1055       result->push_back(it->first);
1056   }
1057 }
1058
1059 void Directory::CollectMetaHandleCounts(
1060     std::vector<int>* num_entries_by_type,
1061     std::vector<int>* num_to_delete_entries_by_type) {
1062   syncable::ReadTransaction trans(FROM_HERE, this);
1063   ScopedKernelLock lock(this);
1064
1065   for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1066        it != kernel_->metahandles_map.end(); ++it) {
1067     EntryKernel* entry = it->second;
1068     const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1069     (*num_entries_by_type)[type]++;
1070     if (entry->ref(IS_DEL))
1071       (*num_to_delete_entries_by_type)[type]++;
1072   }
1073 }
1074
1075 scoped_ptr<base::ListValue> Directory::GetNodeDetailsForType(
1076     BaseTransaction* trans,
1077     ModelType type) {
1078   scoped_ptr<base::ListValue> nodes(new base::ListValue());
1079
1080   ScopedKernelLock lock(this);
1081   for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1082        it != kernel_->metahandles_map.end(); ++it) {
1083     if (GetModelTypeFromSpecifics(it->second->ref(SPECIFICS)) != type) {
1084       continue;
1085     }
1086
1087     EntryKernel* kernel = it->second;
1088     scoped_ptr<base::DictionaryValue> node(
1089         kernel->ToValue(GetCryptographer(trans)));
1090
1091     // Add the position index if appropriate.  This must be done here (and not
1092     // in EntryKernel) because the EntryKernel does not have access to its
1093     // siblings.
1094     if (kernel->ShouldMaintainPosition() && !kernel->ref(IS_DEL)) {
1095       node->SetInteger("positionIndex", GetPositionIndex(trans, kernel));
1096     }
1097
1098     nodes->Append(node.release());
1099   }
1100
1101   return nodes.Pass();
1102 }
1103
1104 bool Directory::CheckInvariantsOnTransactionClose(
1105     syncable::BaseTransaction* trans,
1106     const MetahandleSet& modified_handles) {
1107   // NOTE: The trans may be in the process of being destructed.  Be careful if
1108   // you wish to call any of its virtual methods.
1109   switch (invariant_check_level_) {
1110     case FULL_DB_VERIFICATION: {
1111       MetahandleSet all_handles;
1112       GetAllMetaHandles(trans, &all_handles);
1113       return CheckTreeInvariants(trans, all_handles);
1114     }
1115     case VERIFY_CHANGES: {
1116       return CheckTreeInvariants(trans, modified_handles);
1117     }
1118     case OFF: {
1119       return true;
1120     }
1121   }
1122   NOTREACHED();
1123   return false;
1124 }
1125
1126 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
1127   MetahandleSet handles;
1128   GetAllMetaHandles(trans, &handles);
1129   return CheckTreeInvariants(trans, handles);
1130 }
1131
1132 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1133                                     const MetahandleSet& handles) {
1134   MetahandleSet::const_iterator i;
1135   for (i = handles.begin() ; i != handles.end() ; ++i) {
1136     int64 metahandle = *i;
1137     Entry e(trans, GET_BY_HANDLE, metahandle);
1138     if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
1139       return false;
1140     syncable::Id id = e.GetId();
1141     syncable::Id parentid = e.GetParentId();
1142
1143     if (id.IsRoot()) {
1144       if (!SyncAssert(e.GetIsDir(), FROM_HERE,
1145                       "Entry should be a directory",
1146                       trans))
1147         return false;
1148       if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
1149                       "Entry should be root",
1150                       trans))
1151          return false;
1152       if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE,
1153                       "Entry should be sycned",
1154                       trans))
1155          return false;
1156       continue;
1157     }
1158
1159     if (!e.GetIsDel()) {
1160       if (!SyncAssert(id != parentid, FROM_HERE,
1161                       "Id should be different from parent id.",
1162                       trans))
1163          return false;
1164       if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1165                       "Non unique name should not be empty.",
1166                       trans))
1167         return false;
1168       int safety_count = handles.size() + 1;
1169       while (!parentid.IsRoot()) {
1170         Entry parent(trans, GET_BY_ID, parentid);
1171         if (!SyncAssert(parent.good(), FROM_HERE,
1172                         "Parent entry is not valid.",
1173                         trans))
1174           return false;
1175         if (handles.end() == handles.find(parent.GetMetahandle()))
1176             break; // Skip further checking if parent was unmodified.
1177         if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
1178                         "Parent should be a directory",
1179                         trans))
1180           return false;
1181         if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
1182                         "Parent should not have been marked for deletion.",
1183                         trans))
1184           return false;
1185         if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
1186                         FROM_HERE,
1187                         "Parent should be in the index.",
1188                         trans))
1189           return false;
1190         parentid = parent.GetParentId();
1191         if (!SyncAssert(--safety_count > 0, FROM_HERE,
1192                         "Count should be greater than zero.",
1193                         trans))
1194           return false;
1195       }
1196     }
1197     int64 base_version = e.GetBaseVersion();
1198     int64 server_version = e.GetServerVersion();
1199     bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1200     if (CHANGES_VERSION == base_version || 0 == base_version) {
1201       if (e.GetIsUnappliedUpdate()) {
1202         // Must be a new item, or a de-duplicated unique client tag
1203         // that was created both locally and remotely.
1204         if (!using_unique_client_tag) {
1205           if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1206                           "The entry should not have been deleted.",
1207                           trans))
1208             return false;
1209         }
1210         // It came from the server, so it must have a server ID.
1211         if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1212                         "The id should be from a server.",
1213                         trans))
1214           return false;
1215       } else {
1216         if (e.GetIsDir()) {
1217           // TODO(chron): Implement this mode if clients ever need it.
1218           // For now, you can't combine a client tag and a directory.
1219           if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1220                           "Directory cannot have a client tag.",
1221                           trans))
1222             return false;
1223         }
1224         // Should be an uncomitted item, or a successfully deleted one.
1225         if (!e.GetIsDel()) {
1226           if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1227                           "The item should be unsynced.",
1228                           trans))
1229             return false;
1230         }
1231         // If the next check failed, it would imply that an item exists
1232         // on the server, isn't waiting for application locally, but either
1233         // is an unsynced create or a sucessful delete in the local copy.
1234         // Either way, that's a mismatch.
1235         if (!SyncAssert(0 == server_version, FROM_HERE,
1236                         "Server version should be zero.",
1237                         trans))
1238           return false;
1239         // Items that aren't using the unique client tag should have a zero
1240         // base version only if they have a local ID.  Items with unique client
1241         // tags are allowed to use the zero base version for undeletion and
1242         // de-duplication; the unique client tag trumps the server ID.
1243         if (!using_unique_client_tag) {
1244           if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1245                           "Should be a client only id.",
1246                           trans))
1247             return false;
1248         }
1249       }
1250     } else {
1251       if (!SyncAssert(id.ServerKnows(),
1252                       FROM_HERE,
1253                       "Should be a server id.",
1254                       trans))
1255         return false;
1256     }
1257     // Server-unknown items that are locally deleted should not be sent up to
1258     // the server.  They must be !IS_UNSYNCED.
1259     if (!SyncAssert(!(!id.ServerKnows() && e.GetIsDel() && e.GetIsUnsynced()),
1260                     FROM_HERE,
1261                     "Locally deleted item must not be unsynced.",
1262                     trans)) {
1263       return false;
1264     }
1265   }
1266   return true;
1267 }
1268
1269 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1270   invariant_check_level_ = check_level;
1271 }
1272
1273 int64 Directory::NextMetahandle() {
1274   ScopedKernelLock lock(this);
1275   int64 metahandle = (kernel_->next_metahandle)++;
1276   return metahandle;
1277 }
1278
1279 // Always returns a client ID that is the string representation of a negative
1280 // number.
1281 Id Directory::NextId() {
1282   int64 result;
1283   {
1284     ScopedKernelLock lock(this);
1285     result = (kernel_->persisted_info.next_id)--;
1286     kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1287   }
1288   DCHECK_LT(result, 0);
1289   return Id::CreateFromClientString(base::Int64ToString(result));
1290 }
1291
1292 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1293   ScopedKernelLock lock(this);
1294   return kernel_->parent_child_index.GetChildren(id) != NULL;
1295 }
1296
1297 Id Directory::GetFirstChildId(BaseTransaction* trans,
1298                               const EntryKernel* parent) {
1299   DCHECK(parent);
1300   DCHECK(parent->ref(IS_DIR));
1301
1302   ScopedKernelLock lock(this);
1303   const OrderedChildSet* children =
1304       kernel_->parent_child_index.GetChildren(parent->ref(ID));
1305
1306   // We're expected to return root if there are no children.
1307   if (!children)
1308     return Id();
1309
1310   return (*children->begin())->ref(ID);
1311 }
1312
1313 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1314   ScopedKernelLock lock(this);
1315
1316   DCHECK(ParentChildIndex::ShouldInclude(e));
1317   const OrderedChildSet* children =
1318       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1319   DCHECK(children && !children->empty());
1320   OrderedChildSet::const_iterator i = children->find(e);
1321   DCHECK(i != children->end());
1322
1323   if (i == children->begin()) {
1324     return Id();
1325   } else {
1326     i--;
1327     return (*i)->ref(ID);
1328   }
1329 }
1330
1331 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1332   ScopedKernelLock lock(this);
1333
1334   DCHECK(ParentChildIndex::ShouldInclude(e));
1335   const OrderedChildSet* children =
1336       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1337   DCHECK(children && !children->empty());
1338   OrderedChildSet::const_iterator i = children->find(e);
1339   DCHECK(i != children->end());
1340
1341   i++;
1342   if (i == children->end()) {
1343     return Id();
1344   } else {
1345     return (*i)->ref(ID);
1346   }
1347 }
1348
1349 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1350 // items as siblings of items that do not maintain postions.  It is required
1351 // only for tests.  See crbug.com/178282.
1352 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1353   DCHECK(!e->ref(IS_DEL));
1354   if (!e->ShouldMaintainPosition()) {
1355     DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1356     return;
1357   }
1358   std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1359   DCHECK(!suffix.empty());
1360
1361   // Remove our item from the ParentChildIndex and remember to re-add it later.
1362   ScopedKernelLock lock(this);
1363   ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1364
1365   // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1366   // leave this function.
1367   const OrderedChildSet* siblings =
1368       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1369
1370   if (!siblings) {
1371     // This parent currently has no other children.
1372     DCHECK(predecessor->ref(ID).IsRoot());
1373     UniquePosition pos = UniquePosition::InitialPosition(suffix);
1374     e->put(UNIQUE_POSITION, pos);
1375     return;
1376   }
1377
1378   if (predecessor->ref(ID).IsRoot()) {
1379     // We have at least one sibling, and we're inserting to the left of them.
1380     UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1381
1382     UniquePosition pos;
1383     if (!successor_pos.IsValid()) {
1384       // If all our successors are of non-positionable types, just create an
1385       // initial position.  We arbitrarily choose to sort invalid positions to
1386       // the right of the valid positions.
1387       //
1388       // We really shouldn't need to support this.  See TODO above.
1389       pos = UniquePosition::InitialPosition(suffix);
1390     } else  {
1391       DCHECK(!siblings->empty());
1392       pos = UniquePosition::Before(successor_pos, suffix);
1393     }
1394
1395     e->put(UNIQUE_POSITION, pos);
1396     return;
1397   }
1398
1399   // We can't support placing an item after an invalid position.  Fortunately,
1400   // the tests don't exercise this particular case.  We should not support
1401   // siblings with invalid positions at all.  See TODO above.
1402   DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1403
1404   OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1405   DCHECK(neighbour != siblings->end());
1406
1407   ++neighbour;
1408   if (neighbour == siblings->end()) {
1409     // Inserting at the end of the list.
1410     UniquePosition pos = UniquePosition::After(
1411         predecessor->ref(UNIQUE_POSITION),
1412         suffix);
1413     e->put(UNIQUE_POSITION, pos);
1414     return;
1415   }
1416
1417   EntryKernel* successor = *neighbour;
1418
1419   // Another mixed valid and invalid position case.  This one could be supported
1420   // in theory, but we're trying to deprecate support for siblings with and
1421   // without valid positions.  See TODO above.
1422   DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1423
1424   // Finally, the normal case: inserting between two elements.
1425   UniquePosition pos = UniquePosition::Between(
1426       predecessor->ref(UNIQUE_POSITION),
1427       successor->ref(UNIQUE_POSITION),
1428       suffix);
1429   e->put(UNIQUE_POSITION, pos);
1430   return;
1431 }
1432
1433 // TODO(rlarocque): Avoid this indirection.  Just return the set.
1434 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1435                                    const Id& parent_id,
1436                                    Directory::Metahandles* result) {
1437   const OrderedChildSet* children =
1438       kernel_->parent_child_index.GetChildren(parent_id);
1439   if (!children)
1440     return;
1441
1442   for (OrderedChildSet::const_iterator i = children->begin();
1443        i != children->end(); ++i) {
1444     DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
1445     result->push_back((*i)->ref(META_HANDLE));
1446   }
1447 }
1448
1449 void Directory::UnmarkDirtyEntry(WriteTransaction* trans, Entry* entry) {
1450   CHECK(trans);
1451   entry->kernel_->clear_dirty(&kernel_->dirty_metahandles);
1452 }
1453
1454 }  // namespace syncable
1455 }  // namespace syncer