1 //******************************************************************
3 // Copyright 2015 Intel Mobile Communications GmbH All Rights Reserved.
5 //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
11 // http://www.apache.org/licenses/LICENSE-2.0
13 // Unless required by applicable law or agreed to in writing, software
14 // distributed under the License is distributed on an "AS IS" BASIS,
15 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 // See the License for the specific language governing permissions and
17 // limitations under the License.
19 //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
22 //*********************************************************************
24 // Defining _POSIX_C_SOURCE macro with 200809L (or greater) as value
25 // causes header files to expose definitions
26 // corresponding to the POSIX.1-2008 base
27 // specification (excluding the XSI extension).
28 // For POSIX.1-2008 base specification,
29 // Refer http://pubs.opengroup.org/stage7tc1/
31 // For this specific file, see use of usleep
32 #ifndef _POSIX_C_SOURCE
33 #define _POSIX_C_SOURCE 200809L
34 #endif // _POSIX_C_SOURCE
36 #include "iotivity_config.h"
37 #include "gtest/gtest.h"
40 #include <cathreadpool.h>
45 #ifdef HAVE_SYS_TIME_H
57 //#define DEBUG_VERBOSE 1
59 // The debug print lines are left in for now since the output can be
60 // helpful for developers trying to debug or extend the tests.
61 // However, by default they are #defined out so as not to get in
62 // the way of normal test runs.
64 #define DBG_printf(...) printf(__VA_ARGS__)
66 #define DBG_printf(...)
69 static const uint64_t USECS_PER_SEC = 1000000;
71 static const uint64_t USECS_PER_MSEC = 1000;
73 static const int MINIMAL_LOOP_SLEEP = 20;
74 static const int MINIMAL_EXTRA_SLEEP = 25;
78 uint64_t currentTime=0;
81 clock_gettime(CLOCK_MONOTONIC, &ts);
82 currentTime = ts.tv_sec * USECS_PER_SEC + ts.tv_nsec / 1000;
85 ULARGE_INTEGER microseconds;
87 GetSystemTimeAsFileTime(&time);
89 // Time is in hundreds of nanoseconds, so we must convert to uS
90 microseconds.LowPart = time.dwLowDateTime;
91 microseconds.HighPart = time.dwHighDateTime;
92 microseconds.QuadPart /= HNS_PER_US;
94 currentTime = microseconds.QuadPart;
97 gettimeofday(&tv, NULL);
98 currentTime = tv.tv_sec * USECS_PER_SEC + tv.tv_usec;
103 TEST(MutexTests, TC_01_CREATE)
105 ca_mutex mymutex = ca_mutex_new();
107 EXPECT_TRUE(mymutex != NULL);
110 ca_mutex_free(mymutex);
114 typedef struct _tagFunc1
117 volatile bool thread_up;
118 volatile bool finished;
121 void mutexFunc(void *context)
123 _func1_struct* pData = (_func1_struct*) context;
125 DBG_printf("Thread: trying to lock\n");
127 // setting the flag must be done before lock attempt, as the test
128 // thread starts off with the mutex locked
129 pData->thread_up = true;
130 ca_mutex_lock(pData->mutex);
132 DBG_printf("Thread: got lock\n");
133 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
134 DBG_printf("Thread: releasing\n");
136 pData->finished = true; // assignment guarded by lock
138 ca_mutex_unlock(pData->mutex);
141 TEST(MutexTests, TC_03_THREAD_LOCKING)
143 ca_thread_pool_t mythreadpool;
145 EXPECT_EQ(CA_STATUS_OK, ca_thread_pool_init(3, &mythreadpool));
147 _func1_struct pData = {0, false, false};
149 pData.mutex = ca_mutex_new();
151 EXPECT_TRUE(pData.mutex != NULL);
152 if (pData.mutex != NULL)
154 DBG_printf("test: Holding mutex in test\n");
155 ca_mutex_lock(pData.mutex);
157 DBG_printf("test: starting thread\n");
159 EXPECT_EQ(CA_STATUS_OK,
160 ca_thread_pool_add_task(mythreadpool, mutexFunc, &pData));
162 DBG_printf("test: waiting for thread to be up.\n");
164 while (!pData.thread_up)
166 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
168 // At this point the thread is running and close to trying to lock.
169 // For test purposes only, use of condition variables is being avoided,
170 // so a minor sleep is used.
171 usleep(MINIMAL_EXTRA_SLEEP * USECS_PER_MSEC);
173 DBG_printf("test: unlocking\n");
175 ca_mutex_unlock(pData.mutex);
177 DBG_printf("test: waiting for thread to release\n");
178 while (!pData.finished)
180 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
183 ca_mutex_lock(pData.mutex);
185 // Cleanup Everything
187 ca_mutex_unlock(pData.mutex);
188 ca_mutex_free(pData.mutex);
191 ca_thread_pool_free(mythreadpool);
194 TEST(ConditionTests, TC_01_CREATE)
196 ca_cond mycond = ca_cond_new();
198 EXPECT_TRUE(mycond != NULL);
201 ca_cond_free(mycond);
205 // Normally we would use one pair of mutex/cond-var communicating to the
206 // worker threads and one pair back to the main thread. However since
207 // testing the ca_cond itself is the point, only one pair is used here.
208 typedef struct _tagFunc2
213 volatile bool thread_up;
214 volatile bool finished;
217 void condFunc(void *context)
219 _func2_struct* pData = (_func2_struct*) context;
221 DBG_printf("Thread_%d: waiting on condition\n", pData->id);
223 ca_mutex_lock(pData->mutex);
225 pData->thread_up = true;
227 ca_cond_wait(pData->condition, pData->mutex);
229 pData->finished = true; // assignment guarded by lock
231 ca_mutex_unlock(pData->mutex);
233 DBG_printf("Thread_%d: completed.\n", pData->id);
237 /** @todo: Enable. Need to solve nanosleep issue */
238 TEST(ConditionTests, DISABLED_TC_02_SIGNAL)
240 TEST(ConditionTests, TC_02_SIGNAL)
243 const int MAX_WAIT_MS = 2000;
244 ca_thread_pool_t mythreadpool;
246 EXPECT_EQ(CA_STATUS_OK, ca_thread_pool_init(3, &mythreadpool));
248 ca_mutex sharedMutex = ca_mutex_new();
249 ca_cond sharedCond = ca_cond_new();
251 _func2_struct pData1 =
252 { 1, sharedMutex, sharedCond, false, false };
253 _func2_struct pData2 =
254 { 2, sharedMutex, sharedCond, false, false };
256 EXPECT_TRUE(pData1.mutex != NULL);
257 if (pData1.mutex != NULL)
259 DBG_printf("starting thread\n");
261 EXPECT_EQ(CA_STATUS_OK,
262 ca_thread_pool_add_task(mythreadpool, condFunc, &pData1));
263 EXPECT_EQ(CA_STATUS_OK,
264 ca_thread_pool_add_task(mythreadpool, condFunc, &pData2));
266 DBG_printf("test : sleeping\n");
268 while (!pData1.thread_up || !pData2.thread_up)
270 // For test purposes only, use of condition variables is being
271 // avoided, so a minor sleep is used.
272 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
274 // At this point the threads are running and both have locked. One
275 // has already started waiting on the condition and the other is at
278 ca_mutex_lock(sharedMutex);
279 // once the lock is acquired it means both threads were waiting.
280 DBG_printf("test : signaling first thread\n");
281 ca_cond_signal(sharedCond);
282 ca_mutex_unlock(sharedMutex);
284 // At this point either of the child threads might lock the mutex in
285 // their cond_wait call, or this test thread might lock it again if
286 // mutex_lock gets executed before the child threads can react to
287 // the signaling. Thus we wait on their flag variables
288 int waitCount = 1; // start with 1 for minumum targetWait value.
289 while (!pData1.finished && !pData2.finished)
291 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
295 // As a rough hueristic wait twice as long for the second to possibly
297 int targetWait = waitCount * 2;
299 (i < targetWait) && (!pData1.finished && !pData2.finished); i++)
301 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
303 usleep(MINIMAL_EXTRA_SLEEP);
305 // only one should be finished
306 ca_mutex_lock(sharedMutex);
307 EXPECT_NE(pData1.finished, pData2.finished);
308 ca_mutex_unlock(sharedMutex);
310 DBG_printf("test : signaling another thread\n");
312 ca_mutex_lock(sharedMutex);
313 ca_cond_signal(sharedCond);
314 ca_mutex_unlock(sharedMutex);
317 while ((!pData1.finished || !pData2.finished)
318 && ((waitCount * MINIMAL_EXTRA_SLEEP) < MAX_WAIT_MS))
320 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
324 // both should finally be finished
325 EXPECT_TRUE(pData1.finished);
326 EXPECT_TRUE(pData2.finished);
328 // Cleanup Everything
330 ca_mutex_free(pData1.mutex);
333 ca_cond_free(pData1.condition);
335 ca_thread_pool_free(mythreadpool);
338 TEST(ConditionTests, TC_03_BROADCAST)
340 const int MAX_WAIT_MS = 2000;
341 ca_thread_pool_t mythreadpool;
343 EXPECT_EQ(CA_STATUS_OK, ca_thread_pool_init(3, &mythreadpool));
345 ca_mutex sharedMutex = ca_mutex_new();
346 ca_cond sharedCond = ca_cond_new();
348 _func2_struct pData1 =
349 { 1, sharedMutex, sharedCond, false, false };
350 _func2_struct pData2 =
351 { 2, sharedMutex, sharedCond, false, false };
353 EXPECT_TRUE(pData1.mutex != NULL);
354 if (pData1.mutex != NULL)
356 DBG_printf("starting thread\n");
358 EXPECT_EQ(CA_STATUS_OK,
359 ca_thread_pool_add_task(mythreadpool, condFunc, &pData1));
360 EXPECT_EQ(CA_STATUS_OK,
361 ca_thread_pool_add_task(mythreadpool, condFunc, &pData2));
363 DBG_printf("test : sleeping\n");
365 while (!pData1.thread_up || !pData2.thread_up)
367 // For test purposes only, use of condition variables is being
368 // avoided, so a minor sleep is used.
369 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
371 // At this point the threads are running and both have locked. One
372 // has already started waiting on the condition and the other is at
375 DBG_printf("test : signaling all threads\n");
377 ca_mutex_lock(sharedMutex);
378 // once the lock is acquired it means both threads were waiting.
379 ca_cond_broadcast(sharedCond);
380 ca_mutex_unlock(sharedMutex);
383 while ((!pData1.finished || !pData2.finished)
384 && ((waitCount * MINIMAL_EXTRA_SLEEP) < MAX_WAIT_MS))
386 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
390 // both should finally be finished
391 EXPECT_TRUE(pData1.finished);
392 EXPECT_TRUE(pData2.finished);
394 // Cleanup Everything
396 ca_mutex_free(sharedMutex);
399 ca_cond_free(sharedCond);
401 ca_thread_pool_free(mythreadpool);
405 /** @todo: Enable. Need to solve nanosleep issue */
406 TEST(ConditionTests, DISABLED_TC_04_TIMECHECK)
408 TEST(ConditionTests, TC_04_TIMECHECK)
411 uint64_t begin = getAbsTime();
415 uint64_t end = getAbsTime();
417 EXPECT_LT(begin, end); // should never be the same value
420 void timedFunc(void *context)
422 _func2_struct* pData = (_func2_struct*) context;
424 DBG_printf("Thread_%d: waiting for timeout \n", pData->id);
426 ca_mutex_lock(pData->mutex);
428 uint64_t abs = USECS_PER_SEC / 2; // 1/2 seconds
431 CAWaitResult_t ret = ca_cond_wait_for(pData->condition,
433 EXPECT_EQ(CA_WAIT_TIMEDOUT, ret);
435 pData->thread_up = true;
437 DBG_printf("Thread_%d: waiting for signal \n", pData->id);
439 abs = 5 * USECS_PER_SEC; // 5 seconds
442 ret = ca_cond_wait_for(pData->condition, pData->mutex, abs);
443 EXPECT_EQ(CA_WAIT_SUCCESS, ret);
445 pData->finished = true; // assignment guarded by lock
447 ca_mutex_unlock(pData->mutex);
449 DBG_printf("Thread_%d: stopping\n", pData->id);
452 TEST(ConditionTests, TC_05_WAIT)
454 const int MAX_WAIT_MS = 5000;
455 ca_thread_pool_t mythreadpool;
457 EXPECT_EQ(CA_STATUS_OK, ca_thread_pool_init(3, &mythreadpool));
459 ca_mutex sharedMutex = ca_mutex_new();
460 ca_cond sharedCond = ca_cond_new();
462 _func2_struct pData1 =
463 { 1, sharedMutex, sharedCond, false, false };
465 EXPECT_TRUE(sharedMutex != NULL);
466 if (sharedMutex != NULL)
468 DBG_printf("test : starting thread\n");
470 EXPECT_EQ(CA_STATUS_OK,
471 ca_thread_pool_add_task(mythreadpool, timedFunc, &pData1));
473 DBG_printf("test : waiting for thread to timeout once.\n");
475 while (!pData1.thread_up)
477 // For test purposes only, use of condition variables is being
478 // avoided, so a minor sleep is used.
479 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
483 DBG_printf("test : signaling first thread\n");
485 ca_mutex_lock(sharedMutex);
486 ca_cond_signal(sharedCond);
487 ca_mutex_unlock(sharedMutex);
490 while (!pData1.finished
491 && ((waitCount * MINIMAL_EXTRA_SLEEP) < MAX_WAIT_MS))
493 usleep(MINIMAL_LOOP_SLEEP * USECS_PER_MSEC);
497 EXPECT_TRUE(pData1.finished); // thread should finally be finished
499 // Cleanup Everything
501 ca_mutex_free(sharedMutex);
504 ca_cond_free(sharedCond);
506 ca_thread_pool_free(mythreadpool);
509 // Disabled because this should no longer be a valid test
510 TEST(ConditionTests, DISABLED_TC_06_INVALIDWAIT)
513 ca_mutex sharedMutex = ca_mutex_new();
514 ca_cond sharedCond = ca_cond_new();
516 ca_mutex_lock(sharedMutex);
518 int ret = ca_cond_wait_for(NULL, sharedMutex, 5000);
519 EXPECT_EQ(CA_WAIT_INVAL,ret);
521 ret = ca_cond_wait_for(sharedCond, NULL, 5000);
522 EXPECT_EQ(CA_WAIT_INVAL,ret);
524 ret = ca_cond_wait_for(NULL, NULL, 5000);
525 EXPECT_EQ(CA_WAIT_INVAL,ret);
527 ca_mutex_unlock(sharedMutex);
529 // Cleanup Everything
531 ca_mutex_free(sharedMutex);
533 ca_cond_free(sharedCond);
536 TEST(ConditionTests, TC_07_WAITDURATION)
538 const double TARGET_WAIT = 1.125;
540 ca_mutex sharedMutex = ca_mutex_new();
541 ca_cond sharedCond = ca_cond_new();
543 ca_mutex_lock(sharedMutex);
545 uint64_t beg = getAbsTime();
547 CAWaitResult_t ret = ca_cond_wait_for(sharedCond, sharedMutex,
548 TARGET_WAIT * USECS_PER_SEC);
549 EXPECT_EQ(CA_WAIT_TIMEDOUT,ret);
551 uint64_t end = getAbsTime();
553 double secondsDiff = (end - beg) / (double) USECS_PER_SEC;
556 // Windows does not guarantee that the thread will resume execution from a
557 // yield within any given time frame. We will assume that the threads
558 // should have resumed within one second of the requested timeout value.
559 EXPECT_NEAR(TARGET_WAIT, secondsDiff, 1.00);
561 EXPECT_NEAR(TARGET_WAIT, secondsDiff, 0.05);
564 ca_mutex_unlock(sharedMutex);
566 // Cleanup Everything
568 ca_mutex_free(sharedMutex);
570 ca_cond_free(sharedCond);