- add sources.
[platform/framework/web/crosswalk.git] / src / sync / syncable / directory.cc
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "sync/syncable/directory.h"
6
7 #include <iterator>
8
9 #include "base/base64.h"
10 #include "base/debug/trace_event.h"
11 #include "base/stl_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "sync/internal_api/public/base/unique_position.h"
14 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
15 #include "sync/syncable/entry.h"
16 #include "sync/syncable/entry_kernel.h"
17 #include "sync/syncable/in_memory_directory_backing_store.h"
18 #include "sync/syncable/on_disk_directory_backing_store.h"
19 #include "sync/syncable/scoped_kernel_lock.h"
20 #include "sync/syncable/scoped_parent_child_index_updater.h"
21 #include "sync/syncable/syncable-inl.h"
22 #include "sync/syncable/syncable_base_transaction.h"
23 #include "sync/syncable/syncable_changes_version.h"
24 #include "sync/syncable/syncable_read_transaction.h"
25 #include "sync/syncable/syncable_util.h"
26 #include "sync/syncable/syncable_write_transaction.h"
27
28 using std::string;
29
30 namespace syncer {
31 namespace syncable {
32
33 // static
34 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
35     FILE_PATH_LITERAL("SyncData.sqlite3");
36
37 Directory::PersistedKernelInfo::PersistedKernelInfo()
38     : next_id(0) {
39   ModelTypeSet protocol_types = ProtocolTypes();
40   for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
41        iter.Inc()) {
42     reset_download_progress(iter.Get());
43     transaction_version[iter.Get()] = 0;
44   }
45 }
46
47 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
48
49 void Directory::PersistedKernelInfo::reset_download_progress(
50     ModelType model_type) {
51   download_progress[model_type].set_data_type_id(
52       GetSpecificsFieldNumberFromModelType(model_type));
53   // An empty-string token indicates no prior knowledge.
54   download_progress[model_type].set_token(std::string());
55 }
56
57 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
58     : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
59 }
60
61 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
62   STLDeleteElements(&dirty_metas);
63   STLDeleteElements(&delete_journals);
64 }
65
66 Directory::Kernel::Kernel(
67     const std::string& name,
68     const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
69     const WeakHandle<TransactionObserver>& transaction_observer)
70     : next_write_transaction_id(0),
71       name(name),
72       info_status(Directory::KERNEL_SHARE_INFO_VALID),
73       persisted_info(info.kernel_info),
74       cache_guid(info.cache_guid),
75       next_metahandle(info.max_metahandle + 1),
76       delegate(delegate),
77       transaction_observer(transaction_observer) {
78   DCHECK(delegate);
79   DCHECK(transaction_observer.IsInitialized());
80 }
81
82 Directory::Kernel::~Kernel() {
83   STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
84                                        metahandles_map.end());
85 }
86
87 Directory::Directory(
88     DirectoryBackingStore* store,
89     UnrecoverableErrorHandler* unrecoverable_error_handler,
90     ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
91     NigoriHandler* nigori_handler,
92     Cryptographer* cryptographer)
93     : kernel_(NULL),
94       store_(store),
95       unrecoverable_error_handler_(unrecoverable_error_handler),
96       report_unrecoverable_error_function_(
97           report_unrecoverable_error_function),
98       unrecoverable_error_set_(false),
99       nigori_handler_(nigori_handler),
100       cryptographer_(cryptographer),
101       invariant_check_level_(VERIFY_CHANGES) {
102 }
103
104 Directory::~Directory() {
105   Close();
106 }
107
108 DirOpenResult Directory::Open(
109     const string& name,
110     DirectoryChangeDelegate* delegate,
111     const WeakHandle<TransactionObserver>& transaction_observer) {
112   TRACE_EVENT0("sync", "SyncDatabaseOpen");
113
114   const DirOpenResult result =
115       OpenImpl(name, delegate, transaction_observer);
116
117   if (OPENED != result)
118     Close();
119   return result;
120 }
121
122 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
123   kernel_->metahandles_map.swap(*handles_map);
124   for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
125        it != kernel_->metahandles_map.end(); ++it) {
126     EntryKernel* entry = it->second;
127     if (ParentChildIndex::ShouldInclude(entry))
128       kernel_->parent_child_index.Insert(entry);
129     const int64 metahandle = entry->ref(META_HANDLE);
130     if (entry->ref(IS_UNSYNCED))
131       kernel_->unsynced_metahandles.insert(metahandle);
132     if (entry->ref(IS_UNAPPLIED_UPDATE)) {
133       const ModelType type = entry->GetServerModelType();
134       kernel_->unapplied_update_metahandles[type].insert(metahandle);
135     }
136     if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
137       DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
138              kernel_->server_tags_map.end())
139           << "Unexpected duplicate use of client tag";
140       kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
141     }
142     if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
143       DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
144              kernel_->server_tags_map.end())
145           << "Unexpected duplicate use of server tag";
146       kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
147     }
148     DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
149            kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
150     kernel_->ids_map[entry->ref(ID).value()] = entry;
151     DCHECK(!entry->is_dirty());
152   }
153 }
154
155 DirOpenResult Directory::OpenImpl(
156     const string& name,
157     DirectoryChangeDelegate* delegate,
158     const WeakHandle<TransactionObserver>&
159         transaction_observer) {
160   KernelLoadInfo info;
161   // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
162   // swap these later.
163   Directory::MetahandlesMap tmp_handles_map;
164   JournalIndex delete_journals;
165
166   DirOpenResult result =
167       store_->Load(&tmp_handles_map, &delete_journals, &info);
168   if (OPENED != result)
169     return result;
170
171   kernel_ = new Kernel(name, info, delegate, transaction_observer);
172   delete_journal_.reset(new DeleteJournal(&delete_journals));
173   InitializeIndices(&tmp_handles_map);
174
175   // Write back the share info to reserve some space in 'next_id'.  This will
176   // prevent local ID reuse in the case of an early crash.  See the comments in
177   // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
178   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
179   if (!SaveChanges())
180     return FAILED_INITIAL_WRITE;
181
182   return OPENED;
183 }
184
185 DeleteJournal* Directory::delete_journal() {
186   DCHECK(delete_journal_.get());
187   return delete_journal_.get();
188 }
189
190 void Directory::Close() {
191   store_.reset();
192   if (kernel_) {
193     delete kernel_;
194     kernel_ = NULL;
195   }
196 }
197
198 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
199                                      const tracked_objects::Location& location,
200                                      const std::string & message) {
201   DCHECK(trans != NULL);
202   unrecoverable_error_set_ = true;
203   unrecoverable_error_handler_->OnUnrecoverableError(location,
204                                                      message);
205 }
206
207 EntryKernel* Directory::GetEntryById(const Id& id) {
208   ScopedKernelLock lock(this);
209   return GetEntryById(id, &lock);
210 }
211
212 EntryKernel* Directory::GetEntryById(const Id& id,
213                                      ScopedKernelLock* const lock) {
214   DCHECK(kernel_);
215   // Find it in the in memory ID index.
216   IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
217   if (id_found != kernel_->ids_map.end()) {
218     return id_found->second;
219   }
220   return NULL;
221 }
222
223 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
224   ScopedKernelLock lock(this);
225   DCHECK(kernel_);
226
227   TagsMap::iterator it = kernel_->client_tags_map.find(tag);
228   if (it != kernel_->client_tags_map.end()) {
229     return it->second;
230   }
231   return NULL;
232 }
233
234 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
235   ScopedKernelLock lock(this);
236   DCHECK(kernel_);
237   TagsMap::iterator it = kernel_->server_tags_map.find(tag);
238   if (it != kernel_->server_tags_map.end()) {
239     return it->second;
240   }
241   return NULL;
242 }
243
244 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
245   ScopedKernelLock lock(this);
246   return GetEntryByHandle(metahandle, &lock);
247 }
248
249 EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
250                                          ScopedKernelLock* lock) {
251   // Look up in memory
252   MetahandlesMap::iterator found =
253       kernel_->metahandles_map.find(metahandle);
254   if (found != kernel_->metahandles_map.end()) {
255     // Found it in memory.  Easy.
256     return found->second;
257   }
258   return NULL;
259 }
260
261 bool Directory::GetChildHandlesById(
262     BaseTransaction* trans, const Id& parent_id,
263     Directory::Metahandles* result) {
264   if (!SyncAssert(this == trans->directory(), FROM_HERE,
265                   "Directories don't match", trans))
266     return false;
267   result->clear();
268
269   ScopedKernelLock lock(this);
270   AppendChildHandles(lock, parent_id, result);
271   return true;
272 }
273
274 bool Directory::GetChildHandlesByHandle(
275     BaseTransaction* trans, int64 handle,
276     Directory::Metahandles* result) {
277   if (!SyncAssert(this == trans->directory(), FROM_HERE,
278                   "Directories don't match", trans))
279     return false;
280
281   result->clear();
282
283   ScopedKernelLock lock(this);
284   EntryKernel* kernel = GetEntryByHandle(handle, &lock);
285   if (!kernel)
286     return true;
287
288   AppendChildHandles(lock, kernel->ref(ID), result);
289   return true;
290 }
291
292 int Directory::GetTotalNodeCount(
293     BaseTransaction* trans,
294     EntryKernel* kernel) const {
295   if (!SyncAssert(this == trans->directory(), FROM_HERE,
296                   "Directories don't match", trans))
297     return false;
298
299   int count = 1;
300   std::deque<const OrderedChildSet*> child_sets;
301
302   GetChildSetForKernel(trans, kernel, &child_sets);
303   while (!child_sets.empty()) {
304     const OrderedChildSet* set = child_sets.front();
305     child_sets.pop_front();
306     for (OrderedChildSet::const_iterator it = set->begin();
307          it != set->end(); ++it) {
308       count++;
309       GetChildSetForKernel(trans, *it, &child_sets);
310     }
311   }
312
313   return count;
314 }
315
316 void Directory::GetChildSetForKernel(
317     BaseTransaction* trans,
318     EntryKernel* kernel,
319     std::deque<const OrderedChildSet*>* child_sets) const {
320   if (!kernel->ref(IS_DIR))
321     return;  // Not a directory => no children.
322
323   const OrderedChildSet* descendants =
324       kernel_->parent_child_index.GetChildren(kernel->ref(ID));
325   if (!descendants)
326     return;  // This directory has no children.
327
328   // Add our children to the list of items to be traversed.
329   child_sets->push_back(descendants);
330 }
331
332 int Directory::GetPositionIndex(
333     BaseTransaction* trans,
334     EntryKernel* kernel) const {
335   const OrderedChildSet* siblings =
336       kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID));
337
338   OrderedChildSet::const_iterator it = siblings->find(kernel);
339   return std::distance(siblings->begin(), it);
340 }
341
342 EntryKernel* Directory::GetRootEntry() {
343   return GetEntryById(Id());
344 }
345
346 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
347   ScopedKernelLock lock(this);
348   return InsertEntry(trans, entry, &lock);
349 }
350
351 bool Directory::InsertEntry(BaseWriteTransaction* trans,
352                             EntryKernel* entry,
353                             ScopedKernelLock* lock) {
354   DCHECK(NULL != lock);
355   if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
356     return false;
357
358   static const char error[] = "Entry already in memory index.";
359
360   if (!SyncAssert(
361           kernel_->metahandles_map.insert(
362               std::make_pair(entry->ref(META_HANDLE), entry)).second,
363           FROM_HERE,
364           error,
365           trans)) {
366     return false;
367   }
368   if (!SyncAssert(
369           kernel_->ids_map.insert(
370               std::make_pair(entry->ref(ID).value(), entry)).second,
371           FROM_HERE,
372           error,
373           trans)) {
374     return false;
375   }
376   if (ParentChildIndex::ShouldInclude(entry)) {
377     if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
378                     FROM_HERE,
379                     error,
380                     trans)) {
381       return false;
382     }
383   }
384
385   // Should NEVER be created with a client tag or server tag.
386   if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
387                   "Server tag should be empty", trans)) {
388     return false;
389   }
390   if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
391                   "Client tag should be empty", trans))
392     return false;
393
394   return true;
395 }
396
397 bool Directory::ReindexId(BaseWriteTransaction* trans,
398                           EntryKernel* const entry,
399                           const Id& new_id) {
400   ScopedKernelLock lock(this);
401   if (NULL != GetEntryById(new_id, &lock))
402     return false;
403
404   {
405     // Update the indices that depend on the ID field.
406     ScopedParentChildIndexUpdater updater_b(lock, entry,
407         &kernel_->parent_child_index);
408     size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
409     DCHECK_EQ(1U, num_erased);
410     entry->put(ID, new_id);
411     kernel_->ids_map[entry->ref(ID).value()] = entry;
412   }
413   return true;
414 }
415
416 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
417                                 EntryKernel* const entry,
418                                 const Id& new_parent_id) {
419   ScopedKernelLock lock(this);
420
421   {
422     // Update the indices that depend on the PARENT_ID field.
423     ScopedParentChildIndexUpdater index_updater(lock, entry,
424         &kernel_->parent_child_index);
425     entry->put(PARENT_ID, new_parent_id);
426   }
427   return true;
428 }
429
430 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
431   DCHECK(trans != NULL);
432   return unrecoverable_error_set_;
433 }
434
435 void Directory::ClearDirtyMetahandles() {
436   kernel_->transaction_mutex.AssertAcquired();
437   kernel_->dirty_metahandles.clear();
438 }
439
440 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
441                                       const EntryKernel* const entry) const {
442   bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
443       !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
444       !entry->ref(IS_UNSYNCED);
445
446   if (safe) {
447     int64 handle = entry->ref(META_HANDLE);
448     const ModelType type = entry->GetServerModelType();
449     if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
450                     FROM_HERE,
451                     "Dirty metahandles should be empty", trans))
452       return false;
453     // TODO(tim): Bug 49278.
454     if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
455                     FROM_HERE,
456                     "Unsynced handles should be empty",
457                     trans))
458       return false;
459     if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
460                     FROM_HERE,
461                     "Unapplied metahandles should be empty",
462                     trans))
463       return false;
464   }
465
466   return safe;
467 }
468
469 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
470   ReadTransaction trans(FROM_HERE, this);
471   ScopedKernelLock lock(this);
472
473   // If there is an unrecoverable error then just bail out.
474   if (unrecoverable_error_set(&trans))
475     return;
476
477   // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
478   // clear dirty flags.
479   for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
480        i != kernel_->dirty_metahandles.end(); ++i) {
481     EntryKernel* entry = GetEntryByHandle(*i, &lock);
482     if (!entry)
483       continue;
484     // Skip over false positives; it happens relatively infrequently.
485     if (!entry->is_dirty())
486       continue;
487     snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
488                                  new EntryKernel(*entry));
489     DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
490     // We don't bother removing from the index here as we blow the entire thing
491     // in a moment, and it unnecessarily complicates iteration.
492     entry->clear_dirty(NULL);
493   }
494   ClearDirtyMetahandles();
495
496   // Set purged handles.
497   DCHECK(snapshot->metahandles_to_purge.empty());
498   snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
499
500   // Fill kernel_info_status and kernel_info.
501   snapshot->kernel_info = kernel_->persisted_info;
502   // To avoid duplicates when the process crashes, we record the next_id to be
503   // greater magnitude than could possibly be reached before the next save
504   // changes.  In other words, it's effectively impossible for the user to
505   // generate 65536 new bookmarks in 3 seconds.
506   snapshot->kernel_info.next_id -= 65536;
507   snapshot->kernel_info_status = kernel_->info_status;
508   // This one we reset on failure.
509   kernel_->info_status = KERNEL_SHARE_INFO_VALID;
510
511   delete_journal_->TakeSnapshotAndClear(
512       &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
513 }
514
515 bool Directory::SaveChanges() {
516   bool success = false;
517
518   base::AutoLock scoped_lock(kernel_->save_changes_mutex);
519
520   // Snapshot and save.
521   SaveChangesSnapshot snapshot;
522   TakeSnapshotForSaveChanges(&snapshot);
523   success = store_->SaveChanges(snapshot);
524
525   // Handle success or failure.
526   if (success)
527     success = VacuumAfterSaveChanges(snapshot);
528   else
529     HandleSaveChangesFailure(snapshot);
530   return success;
531 }
532
533 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
534   if (snapshot.dirty_metas.empty())
535     return true;
536
537   // Need a write transaction as we are about to permanently purge entries.
538   WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
539   ScopedKernelLock lock(this);
540   // Now drop everything we can out of memory.
541   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
542        i != snapshot.dirty_metas.end(); ++i) {
543     MetahandlesMap::iterator found =
544         kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
545     EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
546                           NULL : found->second);
547     if (entry && SafeToPurgeFromMemory(&trans, entry)) {
548       // We now drop deleted metahandles that are up to date on both the client
549       // and the server.
550       size_t num_erased = 0;
551       num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
552       DCHECK_EQ(1u, num_erased);
553       num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
554       DCHECK_EQ(1u, num_erased);
555       if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
556         num_erased =
557             kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
558         DCHECK_EQ(1u, num_erased);
559       }
560       if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
561         num_erased =
562             kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
563         DCHECK_EQ(1u, num_erased);
564       }
565       if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
566                       FROM_HERE,
567                       "Deleted entry still present",
568                       (&trans)))
569         return false;
570       delete entry;
571     }
572     if (trans.unrecoverable_error_set())
573       return false;
574   }
575   return true;
576 }
577
578 void Directory::UnapplyEntry(EntryKernel* entry) {
579   int64 handle = entry->ref(META_HANDLE);
580   ModelType server_type = GetModelTypeFromSpecifics(
581       entry->ref(SERVER_SPECIFICS));
582
583   // Clear enough so that on the next sync cycle all local data will
584   // be overwritten.
585   // Note: do not modify the root node in order to preserve the
586   // initial sync ended bit for this type (else on the next restart
587   // this type will be treated as disabled and therefore fully purged).
588   if (IsRealDataType(server_type) &&
589       ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
590     return;
591   }
592
593   // Set the unapplied bit if this item has server data.
594   if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
595     entry->put(IS_UNAPPLIED_UPDATE, true);
596     kernel_->unapplied_update_metahandles[server_type].insert(handle);
597     entry->mark_dirty(&kernel_->dirty_metahandles);
598   }
599
600   // Unset the unsynced bit.
601   if (entry->ref(IS_UNSYNCED)) {
602     kernel_->unsynced_metahandles.erase(handle);
603     entry->put(IS_UNSYNCED, false);
604     entry->mark_dirty(&kernel_->dirty_metahandles);
605   }
606
607   // Mark the item as locally deleted. No deleted items are allowed in the
608   // parent child index.
609   if (!entry->ref(IS_DEL)) {
610     kernel_->parent_child_index.Remove(entry);
611     entry->put(IS_DEL, true);
612     entry->mark_dirty(&kernel_->dirty_metahandles);
613   }
614
615   // Set the version to the "newly created" version.
616   if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
617     entry->put(BASE_VERSION, CHANGES_VERSION);
618     entry->mark_dirty(&kernel_->dirty_metahandles);
619   }
620
621   // At this point locally created items that aren't synced will become locally
622   // deleted items, and purged on the next snapshot. All other items will match
623   // the state they would have had if they were just created via a server
624   // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
625 }
626
627 void Directory::DeleteEntry(bool save_to_journal,
628                             EntryKernel* entry,
629                             EntryKernelSet* entries_to_journal) {
630   int64 handle = entry->ref(META_HANDLE);
631   ModelType server_type = GetModelTypeFromSpecifics(
632       entry->ref(SERVER_SPECIFICS));
633
634   kernel_->metahandles_to_purge.insert(handle);
635
636   size_t num_erased = 0;
637   num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
638   DCHECK_EQ(1u, num_erased);
639   num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
640   DCHECK_EQ(1u, num_erased);
641   num_erased = kernel_->unsynced_metahandles.erase(handle);
642   DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
643   num_erased =
644       kernel_->unapplied_update_metahandles[server_type].erase(handle);
645   DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
646   if (kernel_->parent_child_index.Contains(entry))
647     kernel_->parent_child_index.Remove(entry);
648
649   if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
650     num_erased =
651         kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
652     DCHECK_EQ(1u, num_erased);
653   }
654   if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
655     num_erased =
656         kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
657     DCHECK_EQ(1u, num_erased);
658   }
659
660   if (save_to_journal) {
661     entries_to_journal->insert(entry);
662   } else {
663     delete entry;
664   }
665 }
666
667 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
668                                        ModelTypeSet types_to_journal,
669                                        ModelTypeSet types_to_unapply) {
670   disabled_types.RemoveAll(ProxyTypes());
671
672   if (disabled_types.Empty())
673     return true;
674
675   {
676     WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
677
678     EntryKernelSet entries_to_journal;
679     STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
680
681     {
682       ScopedKernelLock lock(this);
683
684       // We iterate in two passes to avoid a bug in STLport (which is used in
685       // the Android build).  There are some versions of that library where a
686       // hash_map's iterators can be invalidated when an item is erased from the
687       // hash_map.
688       // See http://sourceforge.net/p/stlport/bugs/239/.
689
690       std::set<EntryKernel*> to_purge;
691       for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
692            it != kernel_->metahandles_map.end(); ++it) {
693         const sync_pb::EntitySpecifics& local_specifics =
694             it->second->ref(SPECIFICS);
695         const sync_pb::EntitySpecifics& server_specifics =
696             it->second->ref(SERVER_SPECIFICS);
697         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
698         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
699
700         if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
701             (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
702           to_purge.insert(it->second);
703         }
704       }
705
706       for (std::set<EntryKernel*>::iterator it = to_purge.begin();
707            it != to_purge.end(); ++it) {
708         EntryKernel* entry = *it;
709
710         const sync_pb::EntitySpecifics& local_specifics =
711             (*it)->ref(SPECIFICS);
712         const sync_pb::EntitySpecifics& server_specifics =
713             (*it)->ref(SERVER_SPECIFICS);
714         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
715         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
716
717         if (types_to_unapply.Has(local_type) ||
718             types_to_unapply.Has(server_type)) {
719           UnapplyEntry(entry);
720         } else {
721           bool save_to_journal =
722               (types_to_journal.Has(local_type) ||
723                types_to_journal.Has(server_type)) &&
724               (delete_journal_->IsDeleteJournalEnabled(local_type) ||
725                delete_journal_->IsDeleteJournalEnabled(server_type));
726           DeleteEntry(save_to_journal, entry, &entries_to_journal);
727         }
728       }
729
730       delete_journal_->AddJournalBatch(&trans, entries_to_journal);
731
732       // Ensure meta tracking for these data types reflects the purged state.
733       for (ModelTypeSet::Iterator it = disabled_types.First();
734            it.Good(); it.Inc()) {
735         kernel_->persisted_info.transaction_version[it.Get()] = 0;
736
737         // Don't discard progress markers for unapplied types.
738         if (!types_to_unapply.Has(it.Get()))
739           kernel_->persisted_info.reset_download_progress(it.Get());
740       }
741     }
742   }
743   return true;
744 }
745
746 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
747   WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
748   ScopedKernelLock lock(this);
749   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
750
751   // Because we optimistically cleared the dirty bit on the real entries when
752   // taking the snapshot, we must restore it on failure.  Not doing this could
753   // cause lost data, if no other changes are made to the in-memory entries
754   // that would cause the dirty bit to get set again. Setting the bit ensures
755   // that SaveChanges will at least try again later.
756   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
757        i != snapshot.dirty_metas.end(); ++i) {
758     MetahandlesMap::iterator found =
759         kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
760     if (found != kernel_->metahandles_map.end()) {
761       found->second->mark_dirty(&kernel_->dirty_metahandles);
762     }
763   }
764
765   kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
766                                        snapshot.metahandles_to_purge.end());
767
768   // Restore delete journals.
769   delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
770   delete_journal_->PurgeDeleteJournals(&trans,
771                                        snapshot.delete_journals_to_purge);
772 }
773
774 void Directory::GetDownloadProgress(
775     ModelType model_type,
776     sync_pb::DataTypeProgressMarker* value_out) const {
777   ScopedKernelLock lock(this);
778   return value_out->CopyFrom(
779       kernel_->persisted_info.download_progress[model_type]);
780 }
781
782 void Directory::GetDownloadProgressAsString(
783     ModelType model_type,
784     std::string* value_out) const {
785   ScopedKernelLock lock(this);
786   kernel_->persisted_info.download_progress[model_type].SerializeToString(
787       value_out);
788 }
789
790 size_t Directory::GetEntriesCount() const {
791   ScopedKernelLock lock(this);
792   return kernel_->metahandles_map.size();
793 }
794
795 void Directory::SetDownloadProgress(
796     ModelType model_type,
797     const sync_pb::DataTypeProgressMarker& new_progress) {
798   ScopedKernelLock lock(this);
799   kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
800   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
801 }
802
803 int64 Directory::GetTransactionVersion(ModelType type) const {
804   kernel_->transaction_mutex.AssertAcquired();
805   return kernel_->persisted_info.transaction_version[type];
806 }
807
808 void Directory::IncrementTransactionVersion(ModelType type) {
809   kernel_->transaction_mutex.AssertAcquired();
810   kernel_->persisted_info.transaction_version[type]++;
811 }
812
813 ModelTypeSet Directory::InitialSyncEndedTypes() {
814   syncable::ReadTransaction trans(FROM_HERE, this);
815   ModelTypeSet protocol_types = ProtocolTypes();
816   ModelTypeSet initial_sync_ended_types;
817   for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
818     if (InitialSyncEndedForType(&trans, i.Get())) {
819       initial_sync_ended_types.Put(i.Get());
820     }
821   }
822   return initial_sync_ended_types;
823 }
824
825 bool Directory::InitialSyncEndedForType(ModelType type) {
826   syncable::ReadTransaction trans(FROM_HERE, this);
827   return InitialSyncEndedForType(&trans, type);
828 }
829
830 bool Directory::InitialSyncEndedForType(
831     BaseTransaction* trans, ModelType type) {
832   // True iff the type's root node has been received and applied.
833   syncable::Entry entry(trans,
834                         syncable::GET_BY_SERVER_TAG,
835                         ModelTypeToRootTag(type));
836   return entry.good() && entry.GetBaseVersion() != CHANGES_VERSION;
837 }
838
839 string Directory::store_birthday() const {
840   ScopedKernelLock lock(this);
841   return kernel_->persisted_info.store_birthday;
842 }
843
844 void Directory::set_store_birthday(const string& store_birthday) {
845   ScopedKernelLock lock(this);
846   if (kernel_->persisted_info.store_birthday == store_birthday)
847     return;
848   kernel_->persisted_info.store_birthday = store_birthday;
849   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
850 }
851
852 string Directory::bag_of_chips() const {
853   ScopedKernelLock lock(this);
854   return kernel_->persisted_info.bag_of_chips;
855 }
856
857 void Directory::set_bag_of_chips(const string& bag_of_chips) {
858   ScopedKernelLock lock(this);
859   if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
860     return;
861   kernel_->persisted_info.bag_of_chips = bag_of_chips;
862   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
863 }
864
865
866 string Directory::cache_guid() const {
867   // No need to lock since nothing ever writes to it after load.
868   return kernel_->cache_guid;
869 }
870
871 NigoriHandler* Directory::GetNigoriHandler() {
872   return nigori_handler_;
873 }
874
875 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
876   DCHECK_EQ(this, trans->directory());
877   return cryptographer_;
878 }
879
880 void Directory::GetAllMetaHandles(BaseTransaction* trans,
881                                   MetahandleSet* result) {
882   result->clear();
883   ScopedKernelLock lock(this);
884   for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
885        i != kernel_->metahandles_map.end(); ++i) {
886     result->insert(i->first);
887   }
888 }
889
890 void Directory::GetAllEntryKernels(BaseTransaction* trans,
891                                    std::vector<const EntryKernel*>* result) {
892   result->clear();
893   ScopedKernelLock lock(this);
894   for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
895        i != kernel_->metahandles_map.end(); ++i) {
896     result->push_back(i->second);
897   }
898 }
899
900 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
901                                        Metahandles* result) {
902   result->clear();
903   ScopedKernelLock lock(this);
904   copy(kernel_->unsynced_metahandles.begin(),
905        kernel_->unsynced_metahandles.end(), back_inserter(*result));
906 }
907
908 int64 Directory::unsynced_entity_count() const {
909   ScopedKernelLock lock(this);
910   return kernel_->unsynced_metahandles.size();
911 }
912
913 FullModelTypeSet Directory::GetServerTypesWithUnappliedUpdates(
914     BaseTransaction* trans) const {
915   FullModelTypeSet server_types;
916   ScopedKernelLock lock(this);
917   for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
918     const ModelType type = ModelTypeFromInt(i);
919     if (!kernel_->unapplied_update_metahandles[type].empty()) {
920       server_types.Put(type);
921     }
922   }
923   return server_types;
924 }
925
926 void Directory::GetUnappliedUpdateMetaHandles(
927     BaseTransaction* trans,
928     FullModelTypeSet server_types,
929     std::vector<int64>* result) {
930   result->clear();
931   ScopedKernelLock lock(this);
932   for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
933     const ModelType type = ModelTypeFromInt(i);
934     if (server_types.Has(type)) {
935       std::copy(kernel_->unapplied_update_metahandles[type].begin(),
936                 kernel_->unapplied_update_metahandles[type].end(),
937                 back_inserter(*result));
938     }
939   }
940 }
941
942 void Directory::CollectMetaHandleCounts(
943     std::vector<int>* num_entries_by_type,
944     std::vector<int>* num_to_delete_entries_by_type) {
945   syncable::ReadTransaction trans(FROM_HERE, this);
946   ScopedKernelLock lock(this);
947
948   for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
949        it != kernel_->metahandles_map.end(); ++it) {
950     EntryKernel* entry = it->second;
951     const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
952     (*num_entries_by_type)[type]++;
953     if (entry->ref(IS_DEL))
954       (*num_to_delete_entries_by_type)[type]++;
955   }
956 }
957
958 bool Directory::CheckInvariantsOnTransactionClose(
959     syncable::BaseTransaction* trans,
960     const MetahandleSet& modified_handles) {
961   // NOTE: The trans may be in the process of being destructed.  Be careful if
962   // you wish to call any of its virtual methods.
963   switch (invariant_check_level_) {
964     case FULL_DB_VERIFICATION: {
965       MetahandleSet all_handles;
966       GetAllMetaHandles(trans, &all_handles);
967       return CheckTreeInvariants(trans, all_handles);
968     }
969     case VERIFY_CHANGES: {
970       return CheckTreeInvariants(trans, modified_handles);
971     }
972     case OFF: {
973       return true;
974     }
975   }
976   NOTREACHED();
977   return false;
978 }
979
980 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
981   MetahandleSet handles;
982   GetAllMetaHandles(trans, &handles);
983   return CheckTreeInvariants(trans, handles);
984 }
985
986 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
987                                     const MetahandleSet& handles) {
988   MetahandleSet::const_iterator i;
989   for (i = handles.begin() ; i != handles.end() ; ++i) {
990     int64 metahandle = *i;
991     Entry e(trans, GET_BY_HANDLE, metahandle);
992     if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
993       return false;
994     syncable::Id id = e.GetId();
995     syncable::Id parentid = e.GetParentId();
996
997     if (id.IsRoot()) {
998       if (!SyncAssert(e.GetIsDir(), FROM_HERE,
999                       "Entry should be a directory",
1000                       trans))
1001         return false;
1002       if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
1003                       "Entry should be root",
1004                       trans))
1005          return false;
1006       if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE,
1007                       "Entry should be sycned",
1008                       trans))
1009          return false;
1010       continue;
1011     }
1012
1013     if (!e.GetIsDel()) {
1014       if (!SyncAssert(id != parentid, FROM_HERE,
1015                       "Id should be different from parent id.",
1016                       trans))
1017          return false;
1018       if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1019                       "Non unique name should not be empty.",
1020                       trans))
1021         return false;
1022       int safety_count = handles.size() + 1;
1023       while (!parentid.IsRoot()) {
1024         Entry parent(trans, GET_BY_ID, parentid);
1025         if (!SyncAssert(parent.good(), FROM_HERE,
1026                         "Parent entry is not valid.",
1027                         trans))
1028           return false;
1029         if (handles.end() == handles.find(parent.GetMetahandle()))
1030             break; // Skip further checking if parent was unmodified.
1031         if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
1032                         "Parent should be a directory",
1033                         trans))
1034           return false;
1035         if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
1036                         "Parent should not have been marked for deletion.",
1037                         trans))
1038           return false;
1039         if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
1040                         FROM_HERE,
1041                         "Parent should be in the index.",
1042                         trans))
1043           return false;
1044         parentid = parent.GetParentId();
1045         if (!SyncAssert(--safety_count > 0, FROM_HERE,
1046                         "Count should be greater than zero.",
1047                         trans))
1048           return false;
1049       }
1050     }
1051     int64 base_version = e.GetBaseVersion();
1052     int64 server_version = e.GetServerVersion();
1053     bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1054     if (CHANGES_VERSION == base_version || 0 == base_version) {
1055       if (e.GetIsUnappliedUpdate()) {
1056         // Must be a new item, or a de-duplicated unique client tag
1057         // that was created both locally and remotely.
1058         if (!using_unique_client_tag) {
1059           if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1060                           "The entry should not have been deleted.",
1061                           trans))
1062             return false;
1063         }
1064         // It came from the server, so it must have a server ID.
1065         if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1066                         "The id should be from a server.",
1067                         trans))
1068           return false;
1069       } else {
1070         if (e.GetIsDir()) {
1071           // TODO(chron): Implement this mode if clients ever need it.
1072           // For now, you can't combine a client tag and a directory.
1073           if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1074                           "Directory cannot have a client tag.",
1075                           trans))
1076             return false;
1077         }
1078         // Should be an uncomitted item, or a successfully deleted one.
1079         if (!e.GetIsDel()) {
1080           if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1081                           "The item should be unsynced.",
1082                           trans))
1083             return false;
1084         }
1085         // If the next check failed, it would imply that an item exists
1086         // on the server, isn't waiting for application locally, but either
1087         // is an unsynced create or a sucessful delete in the local copy.
1088         // Either way, that's a mismatch.
1089         if (!SyncAssert(0 == server_version, FROM_HERE,
1090                         "Server version should be zero.",
1091                         trans))
1092           return false;
1093         // Items that aren't using the unique client tag should have a zero
1094         // base version only if they have a local ID.  Items with unique client
1095         // tags are allowed to use the zero base version for undeletion and
1096         // de-duplication; the unique client tag trumps the server ID.
1097         if (!using_unique_client_tag) {
1098           if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1099                           "Should be a client only id.",
1100                           trans))
1101             return false;
1102         }
1103       }
1104     } else {
1105       if (!SyncAssert(id.ServerKnows(),
1106                       FROM_HERE,
1107                       "Should be a server id.",
1108                       trans))
1109         return false;
1110     }
1111     // Server-unknown items that are locally deleted should not be sent up to
1112     // the server.  They must be !IS_UNSYNCED.
1113     if (!SyncAssert(!(!id.ServerKnows() && e.GetIsDel() && e.GetIsUnsynced()),
1114                     FROM_HERE,
1115                     "Locally deleted item must not be unsynced.",
1116                     trans)) {
1117       return false;
1118     }
1119   }
1120   return true;
1121 }
1122
1123 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1124   invariant_check_level_ = check_level;
1125 }
1126
1127 int64 Directory::NextMetahandle() {
1128   ScopedKernelLock lock(this);
1129   int64 metahandle = (kernel_->next_metahandle)++;
1130   return metahandle;
1131 }
1132
1133 // Always returns a client ID that is the string representation of a negative
1134 // number.
1135 Id Directory::NextId() {
1136   int64 result;
1137   {
1138     ScopedKernelLock lock(this);
1139     result = (kernel_->persisted_info.next_id)--;
1140     kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1141   }
1142   DCHECK_LT(result, 0);
1143   return Id::CreateFromClientString(base::Int64ToString(result));
1144 }
1145
1146 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1147   ScopedKernelLock lock(this);
1148   return kernel_->parent_child_index.GetChildren(id) != NULL;
1149 }
1150
1151 Id Directory::GetFirstChildId(BaseTransaction* trans,
1152                               const EntryKernel* parent) {
1153   DCHECK(parent);
1154   DCHECK(parent->ref(IS_DIR));
1155
1156   ScopedKernelLock lock(this);
1157   const OrderedChildSet* children =
1158       kernel_->parent_child_index.GetChildren(parent->ref(ID));
1159
1160   // We're expected to return root if there are no children.
1161   if (!children)
1162     return Id();
1163
1164   return (*children->begin())->ref(ID);
1165 }
1166
1167 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1168   ScopedKernelLock lock(this);
1169
1170   DCHECK(ParentChildIndex::ShouldInclude(e));
1171   const OrderedChildSet* children =
1172       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1173   DCHECK(children && !children->empty());
1174   OrderedChildSet::const_iterator i = children->find(e);
1175   DCHECK(i != children->end());
1176
1177   if (i == children->begin()) {
1178     return Id();
1179   } else {
1180     i--;
1181     return (*i)->ref(ID);
1182   }
1183 }
1184
1185 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1186   ScopedKernelLock lock(this);
1187
1188   DCHECK(ParentChildIndex::ShouldInclude(e));
1189   const OrderedChildSet* children =
1190       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1191   DCHECK(children && !children->empty());
1192   OrderedChildSet::const_iterator i = children->find(e);
1193   DCHECK(i != children->end());
1194
1195   i++;
1196   if (i == children->end()) {
1197     return Id();
1198   } else {
1199     return (*i)->ref(ID);
1200   }
1201 }
1202
1203 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1204 // items as siblings of items that do not maintain postions.  It is required
1205 // only for tests.  See crbug.com/178282.
1206 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1207   DCHECK(!e->ref(IS_DEL));
1208   if (!e->ShouldMaintainPosition()) {
1209     DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1210     return;
1211   }
1212   std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1213   DCHECK(!suffix.empty());
1214
1215   // Remove our item from the ParentChildIndex and remember to re-add it later.
1216   ScopedKernelLock lock(this);
1217   ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1218
1219   // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1220   // leave this function.
1221   const OrderedChildSet* siblings =
1222       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1223
1224   if (!siblings) {
1225     // This parent currently has no other children.
1226     DCHECK(predecessor->ref(ID).IsRoot());
1227     UniquePosition pos = UniquePosition::InitialPosition(suffix);
1228     e->put(UNIQUE_POSITION, pos);
1229     return;
1230   }
1231
1232   if (predecessor->ref(ID).IsRoot()) {
1233     // We have at least one sibling, and we're inserting to the left of them.
1234     UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1235
1236     UniquePosition pos;
1237     if (!successor_pos.IsValid()) {
1238       // If all our successors are of non-positionable types, just create an
1239       // initial position.  We arbitrarily choose to sort invalid positions to
1240       // the right of the valid positions.
1241       //
1242       // We really shouldn't need to support this.  See TODO above.
1243       pos = UniquePosition::InitialPosition(suffix);
1244     } else  {
1245       DCHECK(!siblings->empty());
1246       pos = UniquePosition::Before(successor_pos, suffix);
1247     }
1248
1249     e->put(UNIQUE_POSITION, pos);
1250     return;
1251   }
1252
1253   // We can't support placing an item after an invalid position.  Fortunately,
1254   // the tests don't exercise this particular case.  We should not support
1255   // siblings with invalid positions at all.  See TODO above.
1256   DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1257
1258   OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1259   DCHECK(neighbour != siblings->end());
1260
1261   ++neighbour;
1262   if (neighbour == siblings->end()) {
1263     // Inserting at the end of the list.
1264     UniquePosition pos = UniquePosition::After(
1265         predecessor->ref(UNIQUE_POSITION),
1266         suffix);
1267     e->put(UNIQUE_POSITION, pos);
1268     return;
1269   }
1270
1271   EntryKernel* successor = *neighbour;
1272
1273   // Another mixed valid and invalid position case.  This one could be supported
1274   // in theory, but we're trying to deprecate support for siblings with and
1275   // without valid positions.  See TODO above.
1276   DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1277
1278   // Finally, the normal case: inserting between two elements.
1279   UniquePosition pos = UniquePosition::Between(
1280       predecessor->ref(UNIQUE_POSITION),
1281       successor->ref(UNIQUE_POSITION),
1282       suffix);
1283   e->put(UNIQUE_POSITION, pos);
1284   return;
1285 }
1286
1287 // TODO(rlarocque): Avoid this indirection.  Just return the set.
1288 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1289                                    const Id& parent_id,
1290                                    Directory::Metahandles* result) {
1291   const OrderedChildSet* children =
1292       kernel_->parent_child_index.GetChildren(parent_id);
1293   if (!children)
1294     return;
1295
1296   for (OrderedChildSet::const_iterator i = children->begin();
1297        i != children->end(); ++i) {
1298     DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
1299     result->push_back((*i)->ref(META_HANDLE));
1300   }
1301 }
1302
1303 }  // namespace syncable
1304 }  // namespace syncer