recyclable_chunks_queue_[i] = i;
}
- virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
// Because the number of threads is much less than the number of chunks,
// the queue should never be empty.
DCHECK(!QueueIsEmpty());
return scoped_ptr<TraceBufferChunk>(chunk);
}
- virtual void ReturnChunk(size_t index,
- scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
// When this method is called, the queue should not be full because it
// can contain all chunks including the one to be returned.
DCHECK(!QueueIsFull());
queue_tail_ = NextQueueIndex(queue_tail_);
}
- virtual bool IsFull() const OVERRIDE {
- return false;
- }
+ bool IsFull() const override { return false; }
- virtual size_t Size() const OVERRIDE {
+ size_t Size() const override {
// This is approximate because not all of the chunks are full.
return chunks_.size() * kTraceBufferChunkSize;
}
- virtual size_t Capacity() const OVERRIDE {
+ size_t Capacity() const override {
return max_chunks_ * kTraceBufferChunkSize;
}
- virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
if (handle.chunk_index >= chunks_.size())
return NULL;
TraceBufferChunk* chunk = chunks_[handle.chunk_index];
return chunk->GetEventAt(handle.event_index);
}
- virtual const TraceBufferChunk* NextChunk() OVERRIDE {
+ const TraceBufferChunk* NextChunk() override {
if (chunks_.empty())
return NULL;
return NULL;
}
- virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
for (size_t queue_index = queue_head_; queue_index != queue_tail_;
queue_index = NextQueueIndex(queue_index)) {
TraceBufferChunk* chunk = chunks_[chunk_index];
cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
}
- return cloned_buffer.PassAs<TraceBuffer>();
+ return cloned_buffer.Pass();
}
private:
ClonedTraceBuffer() : current_iteration_index_(0) {}
// The only implemented method.
- virtual const TraceBufferChunk* NextChunk() OVERRIDE {
+ const TraceBufferChunk* NextChunk() override {
return current_iteration_index_ < chunks_.size() ?
chunks_[current_iteration_index_++] : NULL;
}
- virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
NOTIMPLEMENTED();
return scoped_ptr<TraceBufferChunk>();
}
- virtual void ReturnChunk(size_t index,
- scoped_ptr<TraceBufferChunk>) OVERRIDE {
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
NOTIMPLEMENTED();
}
- virtual bool IsFull() const OVERRIDE { return false; }
- virtual size_t Size() const OVERRIDE { return 0; }
- virtual size_t Capacity() const OVERRIDE { return 0; }
- virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
+ bool IsFull() const override { return false; }
+ size_t Size() const override { return 0; }
+ size_t Capacity() const override { return 0; }
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
return NULL;
}
- virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
NOTIMPLEMENTED();
return scoped_ptr<TraceBuffer>();
}
chunks_.reserve(max_chunks_);
}
- virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
// This function may be called when adding normal events or indirectly from
// AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
// have to add the metadata events and flush thread-local buffers even if
new TraceBufferChunk(static_cast<uint32>(*index) + 1));
}
- virtual void ReturnChunk(size_t index,
- scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
DCHECK_GT(in_flight_chunk_count_, 0u);
DCHECK_LT(index, chunks_.size());
DCHECK(!chunks_[index]);
chunks_[index] = chunk.release();
}
- virtual bool IsFull() const OVERRIDE {
- return chunks_.size() >= max_chunks_;
- }
+ bool IsFull() const override { return chunks_.size() >= max_chunks_; }
- virtual size_t Size() const OVERRIDE {
+ size_t Size() const override {
// This is approximate because not all of the chunks are full.
return chunks_.size() * kTraceBufferChunkSize;
}
- virtual size_t Capacity() const OVERRIDE {
+ size_t Capacity() const override {
return max_chunks_ * kTraceBufferChunkSize;
}
- virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
if (handle.chunk_index >= chunks_.size())
return NULL;
TraceBufferChunk* chunk = chunks_[handle.chunk_index];
return chunk->GetEventAt(handle.event_index);
}
- virtual const TraceBufferChunk* NextChunk() OVERRIDE {
+ const TraceBufferChunk* NextChunk() override {
while (current_iteration_index_ < chunks_.size()) {
// Skip in-flight chunks.
const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
return NULL;
}
- virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
NOTIMPLEMENTED();
return scoped_ptr<TraceBuffer>();
}
class TraceSamplingThread : public PlatformThread::Delegate {
public:
TraceSamplingThread();
- virtual ~TraceSamplingThread();
+ ~TraceSamplingThread() override;
// Implementation of PlatformThread::Delegate:
- virtual void ThreadMain() OVERRIDE;
+ void ThreadMain() override;
static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
: public MessageLoop::DestructionObserver {
public:
ThreadLocalEventBuffer(TraceLog* trace_log);
- virtual ~ThreadLocalEventBuffer();
+ ~ThreadLocalEventBuffer() override;
TraceEvent* AddTraceEvent(TraceEventHandle* handle);
private:
// MessageLoop::DestructionObserver
- virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
+ void WillDestroyCurrentMessageLoop() override;
void FlushWhileLocked();
char* duration_end;
double target_duration = strtod(token.c_str(), &duration_end);
if (duration_end != token.c_str()) {
- delay->SetTargetDuration(
- TimeDelta::FromMicroseconds(target_duration * 1e6));
+ delay->SetTargetDuration(TimeDelta::FromMicroseconds(
+ static_cast<int64>(target_duration * 1e6)));
} else if (token == "static") {
delay->SetMode(TraceEventSyntheticDelay::STATIC);
} else if (token == "oneshot") {