Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / gpu / command_buffer / client / fenced_allocator_test.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // This file contains the tests for the FencedAllocator class.
6
7 #include "base/bind.h"
8 #include "base/bind_helpers.h"
9 #include "base/memory/aligned_memory.h"
10 #include "base/message_loop/message_loop.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
12 #include "gpu/command_buffer/client/fenced_allocator.h"
13 #include "gpu/command_buffer/service/cmd_buffer_engine.h"
14 #include "gpu/command_buffer/service/command_buffer_service.h"
15 #include "gpu/command_buffer/service/gpu_scheduler.h"
16 #include "gpu/command_buffer/service/mocks.h"
17 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
18 #include "testing/gtest/include/gtest/gtest.h"
19
20 #if defined(OS_MACOSX)
21 #include "base/mac/scoped_nsautorelease_pool.h"
22 #endif
23
24 namespace gpu {
25
26 using testing::Return;
27 using testing::Mock;
28 using testing::Truly;
29 using testing::Sequence;
30 using testing::DoAll;
31 using testing::Invoke;
32 using testing::InvokeWithoutArgs;
33 using testing::_;
34
35 class BaseFencedAllocatorTest : public testing::Test {
36  protected:
37   static const unsigned int kBufferSize = 1024;
38   static const int kAllocAlignment = 16;
39
40   virtual void SetUp() {
41     api_mock_.reset(new AsyncAPIMock);
42     // ignore noops in the mock - we don't want to inspect the internals of the
43     // helper.
44     EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
45         .WillRepeatedly(Return(error::kNoError));
46     // Forward the SetToken calls to the engine
47     EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
48         .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
49                               Return(error::kNoError)));
50
51     {
52       TransferBufferManager* manager = new TransferBufferManager();
53       transfer_buffer_manager_.reset(manager);
54       EXPECT_TRUE(manager->Initialize());
55     }
56     command_buffer_.reset(
57         new CommandBufferService(transfer_buffer_manager_.get()));
58     EXPECT_TRUE(command_buffer_->Initialize());
59
60     gpu_scheduler_.reset(new GpuScheduler(
61         command_buffer_.get(), api_mock_.get(), NULL));
62     command_buffer_->SetPutOffsetChangeCallback(base::Bind(
63         &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
64     command_buffer_->SetGetBufferChangeCallback(base::Bind(
65         &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
66
67     api_mock_->set_engine(gpu_scheduler_.get());
68
69     helper_.reset(new CommandBufferHelper(command_buffer_.get()));
70     helper_->Initialize(kBufferSize);
71   }
72
73   int32 GetToken() {
74     return command_buffer_->GetLastState().token;
75   }
76
77 #if defined(OS_MACOSX)
78   base::mac::ScopedNSAutoreleasePool autorelease_pool_;
79 #endif
80   base::MessageLoop message_loop_;
81   scoped_ptr<AsyncAPIMock> api_mock_;
82   scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
83   scoped_ptr<CommandBufferService> command_buffer_;
84   scoped_ptr<GpuScheduler> gpu_scheduler_;
85   scoped_ptr<CommandBufferHelper> helper_;
86 };
87
88 #ifndef _MSC_VER
89 const unsigned int BaseFencedAllocatorTest::kBufferSize;
90 #endif
91
92 namespace {
93 void EmptyPoll() {
94 }
95 }
96
97 // Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
98 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
99 // it directly, not through the RPC mechanism), making sure Noops are ignored
100 // and SetToken are properly forwarded to the engine.
101 class FencedAllocatorTest : public BaseFencedAllocatorTest {
102  protected:
103   virtual void SetUp() {
104     BaseFencedAllocatorTest::SetUp();
105     allocator_.reset(new FencedAllocator(kBufferSize,
106                                          helper_.get(),
107                                          base::Bind(&EmptyPoll)));
108   }
109
110   virtual void TearDown() {
111     // If the GpuScheduler posts any tasks, this forces them to run.
112     base::MessageLoop::current()->RunUntilIdle();
113
114     EXPECT_TRUE(allocator_->CheckConsistency());
115
116     BaseFencedAllocatorTest::TearDown();
117   }
118
119   scoped_ptr<FencedAllocator> allocator_;
120 };
121
122 // Checks basic alloc and free.
123 TEST_F(FencedAllocatorTest, TestBasic) {
124   allocator_->CheckConsistency();
125   EXPECT_FALSE(allocator_->InUse());
126
127   const unsigned int kSize = 16;
128   FencedAllocator::Offset offset = allocator_->Alloc(kSize);
129   EXPECT_TRUE(allocator_->InUse());
130   EXPECT_NE(FencedAllocator::kInvalidOffset, offset);
131   EXPECT_GE(kBufferSize, offset+kSize);
132   EXPECT_TRUE(allocator_->CheckConsistency());
133
134   allocator_->Free(offset);
135   EXPECT_FALSE(allocator_->InUse());
136   EXPECT_TRUE(allocator_->CheckConsistency());
137 }
138
139 // Test alloc 0 fails.
140 TEST_F(FencedAllocatorTest, TestAllocZero) {
141   FencedAllocator::Offset offset = allocator_->Alloc(0);
142   EXPECT_EQ(FencedAllocator::kInvalidOffset, offset);
143   EXPECT_FALSE(allocator_->InUse());
144   EXPECT_TRUE(allocator_->CheckConsistency());
145 }
146
147 // Checks out-of-memory condition.
148 TEST_F(FencedAllocatorTest, TestOutOfMemory) {
149   EXPECT_TRUE(allocator_->CheckConsistency());
150
151   const unsigned int kSize = 16;
152   const unsigned int kAllocCount = kBufferSize / kSize;
153   CHECK(kAllocCount * kSize == kBufferSize);
154
155   // Allocate several buffers to fill in the memory.
156   FencedAllocator::Offset offsets[kAllocCount];
157   for (unsigned int i = 0; i < kAllocCount; ++i) {
158     offsets[i] = allocator_->Alloc(kSize);
159     EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
160     EXPECT_GE(kBufferSize, offsets[i]+kSize);
161     EXPECT_TRUE(allocator_->CheckConsistency());
162   }
163
164   // This allocation should fail.
165   FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
166   EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
167   EXPECT_TRUE(allocator_->CheckConsistency());
168
169   // Free one successful allocation, reallocate with half the size
170   allocator_->Free(offsets[0]);
171   EXPECT_TRUE(allocator_->CheckConsistency());
172   offsets[0] = allocator_->Alloc(kSize/2);
173   EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
174   EXPECT_GE(kBufferSize, offsets[0]+kSize);
175   EXPECT_TRUE(allocator_->CheckConsistency());
176
177   // This allocation should fail as well.
178   offset_failed = allocator_->Alloc(kSize);
179   EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
180   EXPECT_TRUE(allocator_->CheckConsistency());
181
182   // Free up everything.
183   for (unsigned int i = 0; i < kAllocCount; ++i) {
184     allocator_->Free(offsets[i]);
185     EXPECT_TRUE(allocator_->CheckConsistency());
186   }
187 }
188
189 // Checks the free-pending-token mechanism.
190 TEST_F(FencedAllocatorTest, TestFreePendingToken) {
191   EXPECT_TRUE(allocator_->CheckConsistency());
192
193   const unsigned int kSize = 16;
194   const unsigned int kAllocCount = kBufferSize / kSize;
195   CHECK(kAllocCount * kSize == kBufferSize);
196
197   // Allocate several buffers to fill in the memory.
198   FencedAllocator::Offset offsets[kAllocCount];
199   for (unsigned int i = 0; i < kAllocCount; ++i) {
200     offsets[i] = allocator_->Alloc(kSize);
201     EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
202     EXPECT_GE(kBufferSize, offsets[i]+kSize);
203     EXPECT_TRUE(allocator_->CheckConsistency());
204   }
205
206   // This allocation should fail.
207   FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
208   EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
209   EXPECT_TRUE(allocator_->CheckConsistency());
210
211   // Free one successful allocation, pending fence.
212   int32 token = helper_.get()->InsertToken();
213   allocator_->FreePendingToken(offsets[0], token);
214   EXPECT_TRUE(allocator_->CheckConsistency());
215
216   // The way we hooked up the helper and engine, it won't process commands
217   // until it has to wait for something. Which means the token shouldn't have
218   // passed yet at this point.
219   EXPECT_GT(token, GetToken());
220
221   // This allocation will need to reclaim the space freed above, so that should
222   // process the commands until the token is passed.
223   offsets[0] = allocator_->Alloc(kSize);
224   EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
225   EXPECT_GE(kBufferSize, offsets[0]+kSize);
226   EXPECT_TRUE(allocator_->CheckConsistency());
227   // Check that the token has indeed passed.
228   EXPECT_LE(token, GetToken());
229
230   // Free up everything.
231   for (unsigned int i = 0; i < kAllocCount; ++i) {
232     allocator_->Free(offsets[i]);
233     EXPECT_TRUE(allocator_->CheckConsistency());
234   }
235 }
236
237 // Checks the free-pending-token mechanism using FreeUnused
238 TEST_F(FencedAllocatorTest, FreeUnused) {
239   EXPECT_TRUE(allocator_->CheckConsistency());
240
241   const unsigned int kSize = 16;
242   const unsigned int kAllocCount = kBufferSize / kSize;
243   CHECK(kAllocCount * kSize == kBufferSize);
244
245   // Allocate several buffers to fill in the memory.
246   FencedAllocator::Offset offsets[kAllocCount];
247   for (unsigned int i = 0; i < kAllocCount; ++i) {
248     offsets[i] = allocator_->Alloc(kSize);
249     EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
250     EXPECT_GE(kBufferSize, offsets[i]+kSize);
251     EXPECT_TRUE(allocator_->CheckConsistency());
252   }
253   EXPECT_TRUE(allocator_->InUse());
254
255   // No memory should be available.
256   EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
257
258   // Free one successful allocation, pending fence.
259   int32 token = helper_.get()->InsertToken();
260   allocator_->FreePendingToken(offsets[0], token);
261   EXPECT_TRUE(allocator_->CheckConsistency());
262
263   // Force the command buffer to process the token.
264   helper_->Finish();
265
266   // Tell the allocator to update what's available based on the current token.
267   allocator_->FreeUnused();
268
269   // Check that the new largest free size takes into account the unused block.
270   EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
271
272   // Free two more.
273   token = helper_.get()->InsertToken();
274   allocator_->FreePendingToken(offsets[1], token);
275   token = helper_.get()->InsertToken();
276   allocator_->FreePendingToken(offsets[2], token);
277   EXPECT_TRUE(allocator_->CheckConsistency());
278
279   // Check that nothing has changed.
280   EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
281
282   // Force the command buffer to process the token.
283   helper_->Finish();
284
285   // Tell the allocator to update what's available based on the current token.
286   allocator_->FreeUnused();
287
288   // Check that the new largest free size takes into account the unused blocks.
289   EXPECT_EQ(kSize * 3, allocator_->GetLargestFreeSize());
290   EXPECT_TRUE(allocator_->InUse());
291
292   // Free up everything.
293   for (unsigned int i = 3; i < kAllocCount; ++i) {
294     allocator_->Free(offsets[i]);
295     EXPECT_TRUE(allocator_->CheckConsistency());
296   }
297   EXPECT_FALSE(allocator_->InUse());
298 }
299
300 // Tests GetLargestFreeSize
301 TEST_F(FencedAllocatorTest, TestGetLargestFreeSize) {
302   EXPECT_TRUE(allocator_->CheckConsistency());
303   EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
304
305   FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
306   ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
307   EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
308   allocator_->Free(offset);
309   EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
310
311   const unsigned int kSize = 16;
312   offset = allocator_->Alloc(kSize);
313   ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
314   // The following checks that the buffer is allocated "smartly" - which is
315   // dependent on the implementation. But both first-fit or best-fit would
316   // ensure that.
317   EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSize());
318
319   // Allocate 2 more buffers (now 3), and then free the first two. This is to
320   // ensure a hole. Note that this is dependent on the first-fit current
321   // implementation.
322   FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
323   ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
324   FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
325   ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
326   allocator_->Free(offset);
327   allocator_->Free(offset1);
328   EXPECT_EQ(kBufferSize - 3 * kSize, allocator_->GetLargestFreeSize());
329
330   offset = allocator_->Alloc(kBufferSize - 3 * kSize);
331   ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
332   EXPECT_EQ(2 * kSize, allocator_->GetLargestFreeSize());
333
334   offset1 = allocator_->Alloc(2 * kSize);
335   ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
336   EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
337
338   allocator_->Free(offset);
339   allocator_->Free(offset1);
340   allocator_->Free(offset2);
341 }
342
343 // Tests GetLargestFreeOrPendingSize
344 TEST_F(FencedAllocatorTest, TestGetLargestFreeOrPendingSize) {
345   EXPECT_TRUE(allocator_->CheckConsistency());
346   EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
347
348   FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
349   ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
350   EXPECT_EQ(0u, allocator_->GetLargestFreeOrPendingSize());
351   allocator_->Free(offset);
352   EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
353
354   const unsigned int kSize = 16;
355   offset = allocator_->Alloc(kSize);
356   ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
357   // The following checks that the buffer is allocates "smartly" - which is
358   // dependent on the implementation. But both first-fit or best-fit would
359   // ensure that.
360   EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeOrPendingSize());
361
362   // Allocate 2 more buffers (now 3), and then free the first two. This is to
363   // ensure a hole. Note that this is dependent on the first-fit current
364   // implementation.
365   FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
366   ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
367   FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
368   ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
369   allocator_->Free(offset);
370   allocator_->Free(offset1);
371   EXPECT_EQ(kBufferSize - 3 * kSize,
372             allocator_->GetLargestFreeOrPendingSize());
373
374   // Free the last one, pending a token.
375   int32 token = helper_.get()->InsertToken();
376   allocator_->FreePendingToken(offset2, token);
377
378   // Now all the buffers have been freed...
379   EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
380   // .. but one is still waiting for the token.
381   EXPECT_EQ(kBufferSize - 3 * kSize,
382             allocator_->GetLargestFreeSize());
383
384   // The way we hooked up the helper and engine, it won't process commands
385   // until it has to wait for something. Which means the token shouldn't have
386   // passed yet at this point.
387   EXPECT_GT(token, GetToken());
388   // This allocation will need to reclaim the space freed above, so that should
389   // process the commands until the token is passed, but it will succeed.
390   offset = allocator_->Alloc(kBufferSize);
391   ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
392   // Check that the token has indeed passed.
393   EXPECT_LE(token, GetToken());
394   allocator_->Free(offset);
395
396   // Everything now has been freed...
397   EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
398   // ... for real.
399   EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
400 }
401
402 class FencedAllocatorPollTest : public BaseFencedAllocatorTest {
403  public:
404   static const unsigned int kAllocSize = 128;
405
406   MOCK_METHOD0(MockedPoll, void());
407
408  protected:
409   virtual void TearDown() {
410     // If the GpuScheduler posts any tasks, this forces them to run.
411     base::MessageLoop::current()->RunUntilIdle();
412
413     BaseFencedAllocatorTest::TearDown();
414   }
415 };
416
417 TEST_F(FencedAllocatorPollTest, TestPoll) {
418   scoped_ptr<FencedAllocator> allocator(
419       new FencedAllocator(kBufferSize,
420                           helper_.get(),
421                           base::Bind(&FencedAllocatorPollTest::MockedPoll,
422                                      base::Unretained(this))));
423
424   FencedAllocator::Offset mem1 = allocator->Alloc(kAllocSize);
425   FencedAllocator::Offset mem2 = allocator->Alloc(kAllocSize);
426   EXPECT_NE(mem1, FencedAllocator::kInvalidOffset);
427   EXPECT_NE(mem2, FencedAllocator::kInvalidOffset);
428   EXPECT_TRUE(allocator->CheckConsistency());
429   EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 2);
430
431   // Check that no-op Poll doesn't affect the state.
432   EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
433   allocator->FreeUnused();
434   EXPECT_TRUE(allocator->CheckConsistency());
435   EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 2);
436
437   // Check that freeing in Poll works.
438   base::Closure free_mem1_closure =
439       base::Bind(&FencedAllocator::Free,
440                  base::Unretained(allocator.get()),
441                  mem1);
442   EXPECT_CALL(*this, MockedPoll())
443       .WillOnce(InvokeWithoutArgs(&free_mem1_closure, &base::Closure::Run))
444       .RetiresOnSaturation();
445   allocator->FreeUnused();
446   EXPECT_TRUE(allocator->CheckConsistency());
447   EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 1);
448
449   // Check that freeing still works.
450   EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
451   allocator->Free(mem2);
452   allocator->FreeUnused();
453   EXPECT_TRUE(allocator->CheckConsistency());
454   EXPECT_EQ(allocator->bytes_in_use(), 0u);
455
456   allocator.reset();
457 }
458
459 // Test fixture for FencedAllocatorWrapper test - Creates a
460 // FencedAllocatorWrapper, using a CommandBufferHelper with a mock
461 // AsyncAPIInterface for its interface (calling it directly, not through the
462 // RPC mechanism), making sure Noops are ignored and SetToken are properly
463 // forwarded to the engine.
464 class FencedAllocatorWrapperTest : public BaseFencedAllocatorTest {
465  protected:
466   virtual void SetUp() {
467     BaseFencedAllocatorTest::SetUp();
468
469     // Though allocating this buffer isn't strictly necessary, it makes
470     // allocations point to valid addresses, so they could be used for
471     // something.
472     buffer_.reset(static_cast<char*>(base::AlignedAlloc(
473         kBufferSize, kAllocAlignment)));
474     allocator_.reset(new FencedAllocatorWrapper(kBufferSize,
475                                                 helper_.get(),
476                                                 base::Bind(&EmptyPoll),
477                                                 buffer_.get()));
478   }
479
480   virtual void TearDown() {
481     // If the GpuScheduler posts any tasks, this forces them to run.
482     base::MessageLoop::current()->RunUntilIdle();
483
484     EXPECT_TRUE(allocator_->CheckConsistency());
485
486     BaseFencedAllocatorTest::TearDown();
487   }
488
489   scoped_ptr<FencedAllocatorWrapper> allocator_;
490   scoped_ptr<char, base::AlignedFreeDeleter> buffer_;
491 };
492
493 // Checks basic alloc and free.
494 TEST_F(FencedAllocatorWrapperTest, TestBasic) {
495   allocator_->CheckConsistency();
496
497   const unsigned int kSize = 16;
498   void *pointer = allocator_->Alloc(kSize);
499   ASSERT_TRUE(pointer);
500   EXPECT_LE(buffer_.get(), static_cast<char *>(pointer));
501   EXPECT_GE(kBufferSize, static_cast<char *>(pointer) - buffer_.get() + kSize);
502   EXPECT_TRUE(allocator_->CheckConsistency());
503
504   allocator_->Free(pointer);
505   EXPECT_TRUE(allocator_->CheckConsistency());
506
507   char *pointer_char = allocator_->AllocTyped<char>(kSize);
508   ASSERT_TRUE(pointer_char);
509   EXPECT_LE(buffer_.get(), pointer_char);
510   EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
511   allocator_->Free(pointer_char);
512   EXPECT_TRUE(allocator_->CheckConsistency());
513
514   unsigned int *pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
515   ASSERT_TRUE(pointer_uint);
516   EXPECT_LE(buffer_.get(), reinterpret_cast<char *>(pointer_uint));
517   EXPECT_GE(buffer_.get() + kBufferSize,
518             reinterpret_cast<char *>(pointer_uint + kSize));
519
520   // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
521   // directly, except from the remaining size.
522   EXPECT_EQ(kBufferSize - kSize * sizeof(*pointer_uint),
523             allocator_->GetLargestFreeSize());
524   allocator_->Free(pointer_uint);
525 }
526
527 // Test alloc 0 fails.
528 TEST_F(FencedAllocatorWrapperTest, TestAllocZero) {
529   allocator_->CheckConsistency();
530
531   void *pointer = allocator_->Alloc(0);
532   ASSERT_FALSE(pointer);
533   EXPECT_TRUE(allocator_->CheckConsistency());
534 }
535
536 // Checks that allocation offsets are aligned to multiples of 16 bytes.
537 TEST_F(FencedAllocatorWrapperTest, TestAlignment) {
538   allocator_->CheckConsistency();
539
540   const unsigned int kSize1 = 75;
541   void *pointer1 = allocator_->Alloc(kSize1);
542   ASSERT_TRUE(pointer1);
543   EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1) & (kAllocAlignment - 1), 0);
544   EXPECT_TRUE(allocator_->CheckConsistency());
545
546   const unsigned int kSize2 = 43;
547   void *pointer2 = allocator_->Alloc(kSize2);
548   ASSERT_TRUE(pointer2);
549   EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2) & (kAllocAlignment - 1), 0);
550   EXPECT_TRUE(allocator_->CheckConsistency());
551
552   allocator_->Free(pointer2);
553   EXPECT_TRUE(allocator_->CheckConsistency());
554
555   allocator_->Free(pointer1);
556   EXPECT_TRUE(allocator_->CheckConsistency());
557 }
558
559 // Checks out-of-memory condition.
560 TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) {
561   allocator_->CheckConsistency();
562
563   const unsigned int kSize = 16;
564   const unsigned int kAllocCount = kBufferSize / kSize;
565   CHECK(kAllocCount * kSize == kBufferSize);
566
567   // Allocate several buffers to fill in the memory.
568   void *pointers[kAllocCount];
569   for (unsigned int i = 0; i < kAllocCount; ++i) {
570     pointers[i] = allocator_->Alloc(kSize);
571     EXPECT_TRUE(pointers[i]);
572     EXPECT_TRUE(allocator_->CheckConsistency());
573   }
574
575   // This allocation should fail.
576   void *pointer_failed = allocator_->Alloc(kSize);
577   EXPECT_FALSE(pointer_failed);
578   EXPECT_TRUE(allocator_->CheckConsistency());
579
580   // Free one successful allocation, reallocate with half the size
581   allocator_->Free(pointers[0]);
582   EXPECT_TRUE(allocator_->CheckConsistency());
583   pointers[0] = allocator_->Alloc(kSize/2);
584   EXPECT_TRUE(pointers[0]);
585   EXPECT_TRUE(allocator_->CheckConsistency());
586
587   // This allocation should fail as well.
588   pointer_failed = allocator_->Alloc(kSize);
589   EXPECT_FALSE(pointer_failed);
590   EXPECT_TRUE(allocator_->CheckConsistency());
591
592   // Free up everything.
593   for (unsigned int i = 0; i < kAllocCount; ++i) {
594     allocator_->Free(pointers[i]);
595     EXPECT_TRUE(allocator_->CheckConsistency());
596   }
597 }
598
599 // Checks the free-pending-token mechanism.
600 TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) {
601   allocator_->CheckConsistency();
602
603   const unsigned int kSize = 16;
604   const unsigned int kAllocCount = kBufferSize / kSize;
605   CHECK(kAllocCount * kSize == kBufferSize);
606
607   // Allocate several buffers to fill in the memory.
608   void *pointers[kAllocCount];
609   for (unsigned int i = 0; i < kAllocCount; ++i) {
610     pointers[i] = allocator_->Alloc(kSize);
611     EXPECT_TRUE(pointers[i]);
612     EXPECT_TRUE(allocator_->CheckConsistency());
613   }
614
615   // This allocation should fail.
616   void *pointer_failed = allocator_->Alloc(kSize);
617   EXPECT_FALSE(pointer_failed);
618   EXPECT_TRUE(allocator_->CheckConsistency());
619
620   // Free one successful allocation, pending fence.
621   int32 token = helper_.get()->InsertToken();
622   allocator_->FreePendingToken(pointers[0], token);
623   EXPECT_TRUE(allocator_->CheckConsistency());
624
625   // The way we hooked up the helper and engine, it won't process commands
626   // until it has to wait for something. Which means the token shouldn't have
627   // passed yet at this point.
628   EXPECT_GT(token, GetToken());
629
630   // This allocation will need to reclaim the space freed above, so that should
631   // process the commands until the token is passed.
632   pointers[0] = allocator_->Alloc(kSize);
633   EXPECT_TRUE(pointers[0]);
634   EXPECT_TRUE(allocator_->CheckConsistency());
635   // Check that the token has indeed passed.
636   EXPECT_LE(token, GetToken());
637
638   // Free up everything.
639   for (unsigned int i = 0; i < kAllocCount; ++i) {
640     allocator_->Free(pointers[i]);
641     EXPECT_TRUE(allocator_->CheckConsistency());
642   }
643 }
644
645 }  // namespace gpu