Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / gpu / command_buffer / client / mapped_memory_unittest.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/client/mapped_memory.h"
6
7 #include <list>
8 #include "base/bind.h"
9 #include "base/memory/scoped_ptr.h"
10 #include "base/message_loop/message_loop.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
12 #include "gpu/command_buffer/service/command_buffer_service.h"
13 #include "gpu/command_buffer/service/gpu_scheduler.h"
14 #include "gpu/command_buffer/service/mocks.h"
15 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
16 #include "testing/gtest/include/gtest/gtest.h"
17
18 #if defined(OS_MACOSX)
19 #include "base/mac/scoped_nsautorelease_pool.h"
20 #endif
21
22 namespace gpu {
23
24 using testing::Return;
25 using testing::Mock;
26 using testing::Truly;
27 using testing::Sequence;
28 using testing::DoAll;
29 using testing::Invoke;
30 using testing::_;
31
32 class MappedMemoryTestBase : public testing::Test {
33  protected:
34   static const unsigned int kBufferSize = 1024;
35
36   virtual void SetUp() {
37     api_mock_.reset(new AsyncAPIMock);
38     // ignore noops in the mock - we don't want to inspect the internals of the
39     // helper.
40     EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
41         .WillRepeatedly(Return(error::kNoError));
42     // Forward the SetToken calls to the engine
43     EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
44         .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
45                               Return(error::kNoError)));
46
47     {
48       TransferBufferManager* manager = new TransferBufferManager();
49       transfer_buffer_manager_.reset(manager);
50       EXPECT_TRUE(manager->Initialize());
51     }
52
53     command_buffer_.reset(
54         new CommandBufferService(transfer_buffer_manager_.get()));
55     EXPECT_TRUE(command_buffer_->Initialize());
56
57     gpu_scheduler_.reset(new GpuScheduler(
58         command_buffer_.get(), api_mock_.get(), NULL));
59     command_buffer_->SetPutOffsetChangeCallback(base::Bind(
60         &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
61     command_buffer_->SetGetBufferChangeCallback(base::Bind(
62         &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
63
64     api_mock_->set_engine(gpu_scheduler_.get());
65
66     helper_.reset(new CommandBufferHelper(command_buffer_.get()));
67     helper_->Initialize(kBufferSize);
68   }
69
70   int32 GetToken() {
71     return command_buffer_->GetLastState().token;
72   }
73
74 #if defined(OS_MACOSX)
75   base::mac::ScopedNSAutoreleasePool autorelease_pool_;
76 #endif
77   base::MessageLoop message_loop_;
78   scoped_ptr<AsyncAPIMock> api_mock_;
79   scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
80   scoped_ptr<CommandBufferService> command_buffer_;
81   scoped_ptr<GpuScheduler> gpu_scheduler_;
82   scoped_ptr<CommandBufferHelper> helper_;
83 };
84
85 #ifndef _MSC_VER
86 const unsigned int MappedMemoryTestBase::kBufferSize;
87 #endif
88
89 namespace {
90 void EmptyPoll() {
91 }
92 }
93
94 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
95 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
96 // it directly, not through the RPC mechanism), making sure Noops are ignored
97 // and SetToken are properly forwarded to the engine.
98 class MemoryChunkTest : public MappedMemoryTestBase {
99  protected:
100   static const int32 kShmId = 123;
101   virtual void SetUp() {
102     MappedMemoryTestBase::SetUp();
103     scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
104     shared_memory->CreateAndMapAnonymous(kBufferSize);
105     buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize);
106     chunk_.reset(new MemoryChunk(kShmId,
107                                  buffer_,
108                                  helper_.get(),
109                                  base::Bind(&EmptyPoll)));
110   }
111
112   virtual void TearDown() {
113     // If the GpuScheduler posts any tasks, this forces them to run.
114     base::MessageLoop::current()->RunUntilIdle();
115
116     MappedMemoryTestBase::TearDown();
117   }
118
119   uint8* buffer_memory() { return static_cast<uint8*>(buffer_->memory()); }
120
121   scoped_ptr<MemoryChunk> chunk_;
122   scoped_refptr<gpu::Buffer> buffer_;
123 };
124
125 #ifndef _MSC_VER
126 const int32 MemoryChunkTest::kShmId;
127 #endif
128
129 TEST_F(MemoryChunkTest, Basic) {
130   const unsigned int kSize = 16;
131   EXPECT_EQ(kShmId, chunk_->shm_id());
132   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
133   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
134   EXPECT_EQ(kBufferSize, chunk_->GetSize());
135   void *pointer = chunk_->Alloc(kSize);
136   ASSERT_TRUE(pointer);
137   EXPECT_LE(buffer_->memory(), static_cast<uint8*>(pointer));
138   EXPECT_GE(kBufferSize,
139             static_cast<uint8*>(pointer) - buffer_memory() + kSize);
140   EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
141   EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
142   EXPECT_EQ(kBufferSize, chunk_->GetSize());
143
144   chunk_->Free(pointer);
145   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
146   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
147
148   uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize));
149   ASSERT_TRUE(pointer_char);
150   EXPECT_LE(buffer_memory(), pointer_char);
151   EXPECT_GE(buffer_memory() + kBufferSize, pointer_char + kSize);
152   EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
153   EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
154   chunk_->Free(pointer_char);
155   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
156   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
157 }
158
159 class MappedMemoryManagerTest : public MappedMemoryTestBase {
160  public:
161   MappedMemoryManager* manager() const {
162     return manager_.get();
163   }
164
165  protected:
166   virtual void SetUp() {
167     MappedMemoryTestBase::SetUp();
168     manager_.reset(new MappedMemoryManager(
169         helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
170   }
171
172   virtual void TearDown() {
173     // If the GpuScheduler posts any tasks, this forces them to run.
174     base::MessageLoop::current()->RunUntilIdle();
175     manager_.reset();
176     MappedMemoryTestBase::TearDown();
177   }
178
179   scoped_ptr<MappedMemoryManager> manager_;
180 };
181
182 TEST_F(MappedMemoryManagerTest, Basic) {
183   const unsigned int kSize = 1024;
184   // Check we can alloc.
185   int32 id1 = -1;
186   unsigned int offset1 = 0xFFFFFFFFU;
187   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
188   ASSERT_TRUE(mem1);
189   EXPECT_NE(-1, id1);
190   EXPECT_EQ(0u, offset1);
191   // Check if we free and realloc the same size we get the same memory
192   int32 id2 = -1;
193   unsigned int offset2 = 0xFFFFFFFFU;
194   manager_->Free(mem1);
195   void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
196   EXPECT_EQ(mem1, mem2);
197   EXPECT_EQ(id1, id2);
198   EXPECT_EQ(offset1, offset2);
199   // Check if we allocate again we get different shared memory
200   int32 id3 = -1;
201   unsigned int offset3 = 0xFFFFFFFFU;
202   void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
203   ASSERT_TRUE(mem3 != NULL);
204   EXPECT_NE(mem2, mem3);
205   EXPECT_NE(id2, id3);
206   EXPECT_EQ(0u, offset3);
207   // Free 3 and allocate 2 half size blocks.
208   manager_->Free(mem3);
209   int32 id4 = -1;
210   int32 id5 = -1;
211   unsigned int offset4 = 0xFFFFFFFFU;
212   unsigned int offset5 = 0xFFFFFFFFU;
213   void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4);
214   void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5);
215   ASSERT_TRUE(mem4 != NULL);
216   ASSERT_TRUE(mem5 != NULL);
217   EXPECT_EQ(id3, id4);
218   EXPECT_EQ(id4, id5);
219   EXPECT_EQ(0u, offset4);
220   EXPECT_EQ(kSize / 2u, offset5);
221   manager_->Free(mem4);
222   manager_->Free(mem2);
223   manager_->Free(mem5);
224 }
225
226 TEST_F(MappedMemoryManagerTest, FreePendingToken) {
227   const unsigned int kSize = 128;
228   const unsigned int kAllocCount = (kBufferSize / kSize) * 2;
229   CHECK(kAllocCount * kSize == kBufferSize * 2);
230
231   // Allocate several buffers across multiple chunks.
232   void *pointers[kAllocCount];
233   for (unsigned int i = 0; i < kAllocCount; ++i) {
234     int32 id = -1;
235     unsigned int offset = 0xFFFFFFFFu;
236     pointers[i] = manager_->Alloc(kSize, &id, &offset);
237     EXPECT_TRUE(pointers[i]);
238     EXPECT_NE(id, -1);
239     EXPECT_NE(offset, 0xFFFFFFFFu);
240   }
241
242   // Free one successful allocation, pending fence.
243   int32 token = helper_.get()->InsertToken();
244   manager_->FreePendingToken(pointers[0], token);
245
246   // The way we hooked up the helper and engine, it won't process commands
247   // until it has to wait for something. Which means the token shouldn't have
248   // passed yet at this point.
249   EXPECT_GT(token, GetToken());
250   // Force it to read up to the token
251   helper_->Finish();
252   // Check that the token has indeed passed.
253   EXPECT_LE(token, GetToken());
254
255   // This allocation should use the spot just freed above.
256   int32 new_id = -1;
257   unsigned int new_offset = 0xFFFFFFFFu;
258   void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset);
259   EXPECT_TRUE(new_ptr);
260   EXPECT_EQ(new_ptr, pointers[0]);
261   EXPECT_NE(new_id, -1);
262   EXPECT_NE(new_offset, 0xFFFFFFFFu);
263
264   // Free up everything.
265   manager_->Free(new_ptr);
266   for (unsigned int i = 1; i < kAllocCount; ++i) {
267     manager_->Free(pointers[i]);
268   }
269 }
270
271 // Check if we don't free we don't crash.
272 TEST_F(MappedMemoryManagerTest, DontFree) {
273   const unsigned int kSize = 1024;
274   // Check we can alloc.
275   int32 id1 = -1;
276   unsigned int offset1 = 0xFFFFFFFFU;
277   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
278   ASSERT_TRUE(mem1);
279 }
280
281 TEST_F(MappedMemoryManagerTest, FreeUnused) {
282   int32 id = -1;
283   unsigned int offset = 0xFFFFFFFFU;
284   void* m1 = manager_->Alloc(kBufferSize, &id, &offset);
285   void* m2 = manager_->Alloc(kBufferSize, &id, &offset);
286   ASSERT_TRUE(m1 != NULL);
287   ASSERT_TRUE(m2 != NULL);
288   EXPECT_EQ(2u, manager_->num_chunks());
289   manager_->FreeUnused();
290   EXPECT_EQ(2u, manager_->num_chunks());
291   manager_->Free(m2);
292   EXPECT_EQ(2u, manager_->num_chunks());
293   manager_->FreeUnused();
294   EXPECT_EQ(1u, manager_->num_chunks());
295   manager_->Free(m1);
296   EXPECT_EQ(1u, manager_->num_chunks());
297   manager_->FreeUnused();
298   EXPECT_EQ(0u, manager_->num_chunks());
299 }
300
301 TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
302   const unsigned int kSize = 1024;
303   manager_->set_chunk_size_multiple(kSize *  2);
304   // Check if we allocate less than the chunk size multiple we get
305   // chunks arounded up.
306   int32 id1 = -1;
307   unsigned int offset1 = 0xFFFFFFFFU;
308   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
309   int32 id2 = -1;
310   unsigned int offset2 = 0xFFFFFFFFU;
311   void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
312   int32 id3 = -1;
313   unsigned int offset3 = 0xFFFFFFFFU;
314   void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
315   ASSERT_TRUE(mem1);
316   ASSERT_TRUE(mem2);
317   ASSERT_TRUE(mem3);
318   EXPECT_NE(-1, id1);
319   EXPECT_EQ(id1, id2);
320   EXPECT_NE(id2, id3);
321   EXPECT_EQ(0u, offset1);
322   EXPECT_EQ(kSize, offset2);
323   EXPECT_EQ(0u, offset3);
324 }
325
326 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
327   const unsigned int kChunkSize = 2048;
328   // Reset the manager with a memory limit.
329   manager_.reset(new MappedMemoryManager(
330       helper_.get(), base::Bind(&EmptyPoll), kChunkSize));
331   manager_->set_chunk_size_multiple(kChunkSize);
332
333   // Allocate one chunk worth of memory.
334   int32 id1 = -1;
335   unsigned int offset1 = 0xFFFFFFFFU;
336   void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
337   ASSERT_TRUE(mem1);
338   EXPECT_NE(-1, id1);
339   EXPECT_EQ(0u, offset1);
340
341   // Allocate half a chunk worth of memory again.
342   // The same chunk will be used.
343   int32 id2 = -1;
344   unsigned int offset2 = 0xFFFFFFFFU;
345   void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2);
346   ASSERT_TRUE(mem2);
347   EXPECT_NE(-1, id2);
348   EXPECT_EQ(0u, offset2);
349
350   // Expect two chunks to be allocated, exceeding the limit,
351   // since all memory is in use.
352   EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
353 }
354
355 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
356   const unsigned int kSize = 1024;
357   // Reset the manager with a memory limit.
358   manager_.reset(new MappedMemoryManager(
359       helper_.get(), base::Bind(&EmptyPoll), kSize));
360   const unsigned int kChunkSize = 2 * 1024;
361   manager_->set_chunk_size_multiple(kChunkSize);
362
363   // Allocate half a chunk worth of memory.
364   int32 id1 = -1;
365   unsigned int offset1 = 0xFFFFFFFFU;
366   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
367   ASSERT_TRUE(mem1);
368   EXPECT_NE(-1, id1);
369   EXPECT_EQ(0u, offset1);
370
371   // Allocate half a chunk worth of memory again.
372   // The same chunk will be used.
373   int32 id2 = -1;
374   unsigned int offset2 = 0xFFFFFFFFU;
375   void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
376   ASSERT_TRUE(mem2);
377   EXPECT_NE(-1, id2);
378   EXPECT_EQ(kSize, offset2);
379
380   // Free one successful allocation, pending fence.
381   int32 token = helper_.get()->InsertToken();
382   manager_->FreePendingToken(mem2, token);
383
384   // The way we hooked up the helper and engine, it won't process commands
385   // until it has to wait for something. Which means the token shouldn't have
386   // passed yet at this point.
387   EXPECT_GT(token, GetToken());
388
389   // Since we didn't call helper_.finish() the token did not pass.
390   // We won't be able to claim the free memory without waiting and
391   // as we've already met the memory limit we'll have to wait
392   // on the token.
393   int32 id3 = -1;
394   unsigned int offset3 = 0xFFFFFFFFU;
395   void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
396   ASSERT_TRUE(mem3);
397   EXPECT_NE(-1, id3);
398   // It will reuse the space from the second allocation just freed.
399   EXPECT_EQ(kSize, offset3);
400
401   // Expect one chunk to be allocated
402   EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
403 }
404
405 namespace {
406 void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) {
407   std::list<void*>::iterator it = list->begin();
408   while (it != list->end()) {
409     void* address = *it;
410     test->manager()->Free(address);
411     it = list->erase(it);
412   }
413 }
414 }
415
416 TEST_F(MappedMemoryManagerTest, Poll) {
417   std::list<void*> unmanaged_memory_list;
418
419   const unsigned int kSize = 1024;
420   // Reset the manager with a memory limit.
421   manager_.reset(new MappedMemoryManager(
422       helper_.get(),
423       base::Bind(&Poll, this, &unmanaged_memory_list),
424       kSize));
425
426   // Allocate kSize bytes. Don't add the address to
427   // the unmanaged memory list, so that it won't be free:ed just yet.
428   int32 id1;
429   unsigned int offset1;
430   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
431   EXPECT_EQ(manager_->bytes_in_use(), kSize);
432
433   // Allocate kSize more bytes, and make sure we grew.
434   int32 id2;
435   unsigned int offset2;
436   void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
437   EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
438
439   // Make the unmanaged buffer be released next time FreeUnused() is called
440   // in MappedMemoryManager/FencedAllocator. This happens for example when
441   // allocating new memory.
442   unmanaged_memory_list.push_back(mem1);
443
444   // Allocate kSize more bytes. This should poll unmanaged memory, which now
445   // should free the previously allocated unmanaged memory.
446   int32 id3;
447   unsigned int offset3;
448   void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
449   EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
450
451   manager_->Free(mem2);
452   manager_->Free(mem3);
453   EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0));
454 }
455
456 }  // namespace gpu