- add sources.
[platform/framework/web/crosswalk.git] / src / gpu / command_buffer / client / mapped_memory_unittest.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/client/mapped_memory.h"
6
7 #include "base/bind.h"
8 #include "base/memory/scoped_ptr.h"
9 #include "base/message_loop/message_loop.h"
10 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
11 #include "gpu/command_buffer/service/command_buffer_service.h"
12 #include "gpu/command_buffer/service/gpu_scheduler.h"
13 #include "gpu/command_buffer/service/mocks.h"
14 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
15 #include "testing/gtest/include/gtest/gtest.h"
16
17 #if defined(OS_MACOSX)
18 #include "base/mac/scoped_nsautorelease_pool.h"
19 #endif
20
21 namespace gpu {
22
23 using testing::Return;
24 using testing::Mock;
25 using testing::Truly;
26 using testing::Sequence;
27 using testing::DoAll;
28 using testing::Invoke;
29 using testing::_;
30
31 class MappedMemoryTestBase : public testing::Test {
32  protected:
33   static const unsigned int kBufferSize = 1024;
34
35   virtual void SetUp() {
36     api_mock_.reset(new AsyncAPIMock);
37     // ignore noops in the mock - we don't want to inspect the internals of the
38     // helper.
39     EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
40         .WillRepeatedly(Return(error::kNoError));
41     // Forward the SetToken calls to the engine
42     EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
43         .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
44                               Return(error::kNoError)));
45
46     {
47       TransferBufferManager* manager = new TransferBufferManager();
48       transfer_buffer_manager_.reset(manager);
49       EXPECT_TRUE(manager->Initialize());
50     }
51
52     command_buffer_.reset(
53         new CommandBufferService(transfer_buffer_manager_.get()));
54     EXPECT_TRUE(command_buffer_->Initialize());
55
56     gpu_scheduler_.reset(new GpuScheduler(
57         command_buffer_.get(), api_mock_.get(), NULL));
58     command_buffer_->SetPutOffsetChangeCallback(base::Bind(
59         &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
60     command_buffer_->SetGetBufferChangeCallback(base::Bind(
61         &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
62
63     api_mock_->set_engine(gpu_scheduler_.get());
64
65     helper_.reset(new CommandBufferHelper(command_buffer_.get()));
66     helper_->Initialize(kBufferSize);
67   }
68
69   int32 GetToken() {
70     return command_buffer_->GetState().token;
71   }
72
73 #if defined(OS_MACOSX)
74   base::mac::ScopedNSAutoreleasePool autorelease_pool_;
75 #endif
76   base::MessageLoop message_loop_;
77   scoped_ptr<AsyncAPIMock> api_mock_;
78   scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
79   scoped_ptr<CommandBufferService> command_buffer_;
80   scoped_ptr<GpuScheduler> gpu_scheduler_;
81   scoped_ptr<CommandBufferHelper> helper_;
82 };
83
84 #ifndef _MSC_VER
85 const unsigned int MappedMemoryTestBase::kBufferSize;
86 #endif
87
88 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
89 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
90 // it directly, not through the RPC mechanism), making sure Noops are ignored
91 // and SetToken are properly forwarded to the engine.
92 class MemoryChunkTest : public MappedMemoryTestBase {
93  protected:
94   static const int32 kShmId = 123;
95   virtual void SetUp() {
96     MappedMemoryTestBase::SetUp();
97     buffer_.reset(new uint8[kBufferSize]);
98     gpu::Buffer buf;
99     buf.size = kBufferSize;
100     buf.ptr = buffer_.get();
101     chunk_.reset(new MemoryChunk(kShmId, buf, helper_.get()));
102   }
103
104   virtual void TearDown() {
105     // If the GpuScheduler posts any tasks, this forces them to run.
106     base::MessageLoop::current()->RunUntilIdle();
107
108     MappedMemoryTestBase::TearDown();
109   }
110
111   scoped_ptr<MemoryChunk> chunk_;
112   scoped_ptr<uint8[]> buffer_;
113 };
114
115 #ifndef _MSC_VER
116 const int32 MemoryChunkTest::kShmId;
117 #endif
118
119 TEST_F(MemoryChunkTest, Basic) {
120   const unsigned int kSize = 16;
121   EXPECT_EQ(kShmId, chunk_->shm_id());
122   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
123   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
124   EXPECT_EQ(kBufferSize, chunk_->GetSize());
125   void *pointer = chunk_->Alloc(kSize);
126   ASSERT_TRUE(pointer);
127   EXPECT_LE(buffer_.get(), static_cast<uint8 *>(pointer));
128   EXPECT_GE(kBufferSize, static_cast<uint8 *>(pointer) - buffer_.get() + kSize);
129   EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
130   EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
131   EXPECT_EQ(kBufferSize, chunk_->GetSize());
132
133   chunk_->Free(pointer);
134   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
135   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
136
137   uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize));
138   ASSERT_TRUE(pointer_char);
139   EXPECT_LE(buffer_.get(), pointer_char);
140   EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
141   EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
142   EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
143   chunk_->Free(pointer_char);
144   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
145   EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
146 }
147
148 class MappedMemoryManagerTest : public MappedMemoryTestBase {
149  protected:
150   virtual void SetUp() {
151     MappedMemoryTestBase::SetUp();
152     manager_.reset(new MappedMemoryManager(
153         helper_.get(), MappedMemoryManager::kNoLimit));
154   }
155
156   virtual void TearDown() {
157     // If the GpuScheduler posts any tasks, this forces them to run.
158     base::MessageLoop::current()->RunUntilIdle();
159     manager_.reset();
160     MappedMemoryTestBase::TearDown();
161   }
162
163   scoped_ptr<MappedMemoryManager> manager_;
164 };
165
166 TEST_F(MappedMemoryManagerTest, Basic) {
167   const unsigned int kSize = 1024;
168   // Check we can alloc.
169   int32 id1 = -1;
170   unsigned int offset1 = 0xFFFFFFFFU;
171   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
172   ASSERT_TRUE(mem1);
173   EXPECT_NE(-1, id1);
174   EXPECT_EQ(0u, offset1);
175   // Check if we free and realloc the same size we get the same memory
176   int32 id2 = -1;
177   unsigned int offset2 = 0xFFFFFFFFU;
178   manager_->Free(mem1);
179   void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
180   EXPECT_EQ(mem1, mem2);
181   EXPECT_EQ(id1, id2);
182   EXPECT_EQ(offset1, offset2);
183   // Check if we allocate again we get different shared memory
184   int32 id3 = -1;
185   unsigned int offset3 = 0xFFFFFFFFU;
186   void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
187   ASSERT_TRUE(mem3 != NULL);
188   EXPECT_NE(mem2, mem3);
189   EXPECT_NE(id2, id3);
190   EXPECT_EQ(0u, offset3);
191   // Free 3 and allocate 2 half size blocks.
192   manager_->Free(mem3);
193   int32 id4 = -1;
194   int32 id5 = -1;
195   unsigned int offset4 = 0xFFFFFFFFU;
196   unsigned int offset5 = 0xFFFFFFFFU;
197   void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4);
198   void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5);
199   ASSERT_TRUE(mem4 != NULL);
200   ASSERT_TRUE(mem5 != NULL);
201   EXPECT_EQ(id3, id4);
202   EXPECT_EQ(id4, id5);
203   EXPECT_EQ(0u, offset4);
204   EXPECT_EQ(kSize / 2u, offset5);
205   manager_->Free(mem4);
206   manager_->Free(mem2);
207   manager_->Free(mem5);
208 }
209
210 TEST_F(MappedMemoryManagerTest, FreePendingToken) {
211   const unsigned int kSize = 128;
212   const unsigned int kAllocCount = (kBufferSize / kSize) * 2;
213   CHECK(kAllocCount * kSize == kBufferSize * 2);
214
215   // Allocate several buffers across multiple chunks.
216   void *pointers[kAllocCount];
217   for (unsigned int i = 0; i < kAllocCount; ++i) {
218     int32 id = -1;
219     unsigned int offset = 0xFFFFFFFFu;
220     pointers[i] = manager_->Alloc(kSize, &id, &offset);
221     EXPECT_TRUE(pointers[i]);
222     EXPECT_NE(id, -1);
223     EXPECT_NE(offset, 0xFFFFFFFFu);
224   }
225
226   // Free one successful allocation, pending fence.
227   int32 token = helper_.get()->InsertToken();
228   manager_->FreePendingToken(pointers[0], token);
229
230   // The way we hooked up the helper and engine, it won't process commands
231   // until it has to wait for something. Which means the token shouldn't have
232   // passed yet at this point.
233   EXPECT_GT(token, GetToken());
234   // Force it to read up to the token
235   helper_->Finish();
236   // Check that the token has indeed passed.
237   EXPECT_LE(token, GetToken());
238
239   // This allocation should use the spot just freed above.
240   int32 new_id = -1;
241   unsigned int new_offset = 0xFFFFFFFFu;
242   void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset);
243   EXPECT_TRUE(new_ptr);
244   EXPECT_EQ(new_ptr, pointers[0]);
245   EXPECT_NE(new_id, -1);
246   EXPECT_NE(new_offset, 0xFFFFFFFFu);
247
248   // Free up everything.
249   manager_->Free(new_ptr);
250   for (unsigned int i = 1; i < kAllocCount; ++i) {
251     manager_->Free(pointers[i]);
252   }
253 }
254
255 // Check if we don't free we don't crash.
256 TEST_F(MappedMemoryManagerTest, DontFree) {
257   const unsigned int kSize = 1024;
258   // Check we can alloc.
259   int32 id1 = -1;
260   unsigned int offset1 = 0xFFFFFFFFU;
261   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
262   ASSERT_TRUE(mem1);
263 }
264
265 TEST_F(MappedMemoryManagerTest, FreeUnused) {
266   int32 id = -1;
267   unsigned int offset = 0xFFFFFFFFU;
268   void* m1 = manager_->Alloc(kBufferSize, &id, &offset);
269   void* m2 = manager_->Alloc(kBufferSize, &id, &offset);
270   ASSERT_TRUE(m1 != NULL);
271   ASSERT_TRUE(m2 != NULL);
272   EXPECT_EQ(2u, manager_->num_chunks());
273   manager_->FreeUnused();
274   EXPECT_EQ(2u, manager_->num_chunks());
275   manager_->Free(m2);
276   EXPECT_EQ(2u, manager_->num_chunks());
277   manager_->FreeUnused();
278   EXPECT_EQ(1u, manager_->num_chunks());
279   manager_->Free(m1);
280   EXPECT_EQ(1u, manager_->num_chunks());
281   manager_->FreeUnused();
282   EXPECT_EQ(0u, manager_->num_chunks());
283 }
284
285 TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
286   const unsigned int kSize = 1024;
287   manager_->set_chunk_size_multiple(kSize *  2);
288   // Check if we allocate less than the chunk size multiple we get
289   // chunks arounded up.
290   int32 id1 = -1;
291   unsigned int offset1 = 0xFFFFFFFFU;
292   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
293   int32 id2 = -1;
294   unsigned int offset2 = 0xFFFFFFFFU;
295   void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
296   int32 id3 = -1;
297   unsigned int offset3 = 0xFFFFFFFFU;
298   void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
299   ASSERT_TRUE(mem1);
300   ASSERT_TRUE(mem2);
301   ASSERT_TRUE(mem3);
302   EXPECT_NE(-1, id1);
303   EXPECT_EQ(id1, id2);
304   EXPECT_NE(id2, id3);
305   EXPECT_EQ(0u, offset1);
306   EXPECT_EQ(kSize, offset2);
307   EXPECT_EQ(0u, offset3);
308 }
309
310 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
311   const unsigned int kChunkSize = 2048;
312   // Reset the manager with a memory limit.
313   manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize));
314   manager_->set_chunk_size_multiple(kChunkSize);
315
316   // Allocate one chunk worth of memory.
317   int32 id1 = -1;
318   unsigned int offset1 = 0xFFFFFFFFU;
319   void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
320   ASSERT_TRUE(mem1);
321   EXPECT_NE(-1, id1);
322   EXPECT_EQ(0u, offset1);
323
324   // Allocate half a chunk worth of memory again.
325   // The same chunk will be used.
326   int32 id2 = -1;
327   unsigned int offset2 = 0xFFFFFFFFU;
328   void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2);
329   ASSERT_TRUE(mem2);
330   EXPECT_NE(-1, id2);
331   EXPECT_EQ(0u, offset2);
332
333   // Expect two chunks to be allocated, exceeding the limit,
334   // since all memory is in use.
335   EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
336 }
337
338 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
339   const unsigned int kSize = 1024;
340   // Reset the manager with a memory limit.
341   manager_.reset(new MappedMemoryManager(helper_.get(), kSize));
342   const unsigned int kChunkSize = 2 * 1024;
343   manager_->set_chunk_size_multiple(kChunkSize);
344
345   // Allocate half a chunk worth of memory.
346   int32 id1 = -1;
347   unsigned int offset1 = 0xFFFFFFFFU;
348   void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
349   ASSERT_TRUE(mem1);
350   EXPECT_NE(-1, id1);
351   EXPECT_EQ(0u, offset1);
352
353   // Allocate half a chunk worth of memory again.
354   // The same chunk will be used.
355   int32 id2 = -1;
356   unsigned int offset2 = 0xFFFFFFFFU;
357   void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
358   ASSERT_TRUE(mem2);
359   EXPECT_NE(-1, id2);
360   EXPECT_EQ(kSize, offset2);
361
362   // Free one successful allocation, pending fence.
363   int32 token = helper_.get()->InsertToken();
364   manager_->FreePendingToken(mem2, token);
365
366   // The way we hooked up the helper and engine, it won't process commands
367   // until it has to wait for something. Which means the token shouldn't have
368   // passed yet at this point.
369   EXPECT_GT(token, GetToken());
370
371   // Since we didn't call helper_.finish() the token did not pass.
372   // We won't be able to claim the free memory without waiting and
373   // as we've already met the memory limit we'll have to wait
374   // on the token.
375   int32 id3 = -1;
376   unsigned int offset3 = 0xFFFFFFFFU;
377   void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
378   ASSERT_TRUE(mem3);
379   EXPECT_NE(-1, id3);
380   // It will reuse the space from the second allocation just freed.
381   EXPECT_EQ(kSize, offset3);
382
383   // Expect one chunk to be allocated
384   EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
385 }
386
387 }  // namespace gpu