2 * Copyright (c) 2000 - 2015 Samsung Electronics Co., Ltd. All rights reserved.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
20 #include <secure_socket.h>
24 #include <sys/types.h>
25 #include <sys/timerfd.h>
30 #include <cynara-client.h>
31 #include <cynara-creds-socket.h>
32 #include <cynara-session.h>
34 #include "service_common.h"
37 #define EVT_END_CH 'x'
41 struct service_event_item {
52 int (*event_cb)(struct service_context *svc_cx, void *data);
58 * Server information and global (only in this file-scope) variables are defined
60 struct service_context {
61 pthread_t server_thid; /*!< Server thread Id */
62 int fd; /*!< Server socket handle */
64 cynara *p_cynara; /*!< Cynara handle */
66 Eina_List *tcb_list; /*!< TCB list, list of every thread for client connections */
68 Eina_List *packet_list;
69 pthread_mutex_t packet_list_lock;
70 int evt_pipe[PIPE_MAX];
71 int tcb_pipe[PIPE_MAX];
73 int (*service_thread_main)(struct tcb *tcb, struct packet *packet, void *data);
74 void *service_thread_data;
76 Eina_List *event_list;
81 struct packet *packet;
86 * Thread Control Block
87 * - The main server will create a thread for every client connections.
88 * When a new client is comming to us, this TCB block will be allocated and initialized.
90 struct tcb { /* Thread controll block */
91 struct service_context *svc_ctx;
92 pthread_t thid; /*!< Thread Id */
93 int fd; /*!< Connection handle */
94 char *client; /*!< Client socket credential */
95 char *user; /*!< User socket credential */
96 char *session; /*!< Session context for cynara */
101 * Do services for clients
102 * Routing packets to destination processes.
105 static void *client_packet_pump_main(void *data)
107 struct tcb *tcb = data;
108 struct service_context *svc_ctx = tcb->svc_ctx;
109 struct packet *packet;
117 char evt_ch = EVT_CH;
124 struct packet_info *packet_info;
128 recv_state = RECV_INIT;
131 * Fill connection credentials
133 if (!fill_creds(tcb)) {
138 * To escape from the switch statement, we use this ret value
142 FD_SET(tcb->fd, &set);
143 ret = select(tcb->fd + 1, &set, NULL, NULL, NULL);
146 if (errno == EINTR) {
153 } else if (ret == 0) {
160 if (!FD_ISSET(tcb->fd, &set)) {
169 * Service!!! Receive packet & route packet
171 switch (recv_state) {
173 size = packet_header_size();
182 recv_state = RECV_HEADER;
183 /* Go through, don't break from here */
185 ret = secure_socket_recv(tcb->fd, ptr, size - recv_offset, &pid);
197 if (recv_offset == size) {
198 packet = packet_build(packet, packet_offset, ptr, size);
206 packet_offset += recv_offset;
208 size = packet_payload_size(packet);
210 recv_state = RECV_DONE;
215 recv_state = RECV_PAYLOAD;
225 ret = secure_socket_recv(tcb->fd, ptr, size - recv_offset, &pid);
237 if (recv_offset == size) {
238 packet = packet_build(packet, packet_offset, ptr, size);
246 packet_offset += recv_offset;
248 recv_state = RECV_DONE;
258 if (recv_state == RECV_DONE) {
260 * Push this packet to the packet list with TCB
261 * Then the service main function will get this.
263 packet_info = malloc(sizeof(*packet_info));
266 packet_destroy(packet);
270 packet_info->packet = packet;
271 packet_info->tcb = tcb;
273 CRITICAL_SECTION_BEGIN(&svc_ctx->packet_list_lock);
274 svc_ctx->packet_list = eina_list_append(svc_ctx->packet_list, packet_info);
275 CRITICAL_SECTION_END(&svc_ctx->packet_list_lock);
277 if (write(svc_ctx->evt_pipe[PIPE_WRITE], &evt_ch, sizeof(evt_ch)) != sizeof(evt_ch)) {
279 CRITICAL_SECTION_BEGIN(&svc_ctx->packet_list_lock);
280 svc_ctx->packet_list = eina_list_remove(svc_ctx->packet_list, packet_info);
281 CRITICAL_SECTION_END(&svc_ctx->packet_list_lock);
283 packet_destroy(packet);
287 recv_state = RECV_INIT;
292 CRITICAL_SECTION_BEGIN(&svc_ctx->packet_list_lock);
293 EINA_LIST_FOREACH(svc_ctx->packet_list, l, packet_info) {
294 if (packet_info->tcb == tcb) {
295 packet_info->tcb = NULL;
298 CRITICAL_SECTION_END(&svc_ctx->packet_list_lock);
302 * Emit a signal to collect this TCB from the SERVER THREAD.
304 write(svc_ctx->tcb_pipe[PIPE_WRITE], &tcb, sizeof(tcb)) != sizeof(tcb);
313 static inline struct tcb *tcb_create(struct service_context *svc_ctx, int fd)
318 tcb = malloc(sizeof(*tcb));
324 tcb->svc_ctx = svc_ctx;
325 tcb->type = TCB_CLIENT_TYPE_APP;
327 status = pthread_create(&tcb->thid, NULL, client_packet_pump_main, tcb);
333 svc_ctx->tcb_list = eina_list_append(svc_ctx->tcb_list, tcb);
341 static inline void tcb_teminate_all(struct service_context *svc_ctx)
348 * We don't need to make critical section on here.
349 * If we call this after terminate the server thread first.
350 * Then there is no other thread to access tcb_list.
352 EINA_LIST_FREE(svc_ctx->tcb_list, tcb) {
354 * ASSERT(tcb->fd >= 0);
356 secure_socket_destroy_handle(tcb->fd);
358 status = pthread_join(tcb->thid, &ret);
368 static inline void tcb_destroy(struct service_context *svc_ctx, struct tcb *tcb)
373 svc_ctx->tcb_list = eina_list_remove(svc_ctx->tcb_list, tcb);
375 * ASSERT(tcb->fd >= 0);
376 * Close the connection, and then collecting the return value of thread
378 secure_socket_destroy_handle(tcb->fd);
383 status = pthread_join(tcb->thid, &ret);
392 static inline int find_max_fd(struct service_context *svc_ctx)
396 struct service_event_item *item;
398 fd = svc_ctx->fd > svc_ctx->tcb_pipe[PIPE_READ] ? svc_ctx->fd : svc_ctx->tcb_pipe[PIPE_READ];
399 fd = fd > svc_ctx->evt_pipe[PIPE_READ] ? fd : svc_ctx->evt_pipe[PIPE_READ];
401 EINA_LIST_FOREACH(svc_ctx->event_list, l, item) {
402 if (item->type == SERVICE_EVENT_TIMER && fd < item->info.timer.fd)
403 fd = item->info.timer.fd;
414 static inline void update_fdset(struct service_context *svc_ctx, fd_set *set)
417 struct service_event_item *item;
420 FD_SET(svc_ctx->fd, set);
421 FD_SET(svc_ctx->tcb_pipe[PIPE_READ], set);
422 FD_SET(svc_ctx->evt_pipe[PIPE_READ], set);
424 EINA_LIST_FOREACH(svc_ctx->event_list, l, item) {
425 if (item->type == SERVICE_EVENT_TIMER)
426 FD_SET(item->info.timer.fd, set);
434 static inline void processing_timer_event(struct service_context *svc_ctx, fd_set *set)
436 uint64_t expired_count;
439 struct service_event_item *item;
441 EINA_LIST_FOREACH_SAFE(svc_ctx->event_list, l, n, item) {
442 switch (item->type) {
443 case SERVICE_EVENT_TIMER:
444 if (!FD_ISSET(item->info.timer.fd, set))
447 if (read(item->info.timer.fd, &expired_count, sizeof(expired_count)) == sizeof(expired_count)) {
448 if (item->event_cb(svc_ctx, item->cbdata) >= 0)
452 if (!eina_list_data_find(svc_ctx->event_list, item))
455 svc_ctx->event_list = eina_list_remove(svc_ctx->event_list, item);
456 close(item->info.timer.fd);
467 * Accept new client connections
468 * And create a new thread for service.
470 * Create Client threads & Destroying them
473 static void *server_main(void *data)
475 struct service_context *svc_ctx = data;
482 struct packet_info *packet_info;
485 fd = find_max_fd(svc_ctx);
486 update_fdset(svc_ctx, &set);
488 ret = select(fd, &set, NULL, NULL, NULL);
491 if (errno == EINTR) {
495 } else if (ret == 0) {
500 if (FD_ISSET(svc_ctx->fd, &set)) {
501 client_fd = secure_socket_get_connection_handle(svc_ctx->fd);
507 tcb = tcb_create(svc_ctx, client_fd);
509 secure_socket_destroy_handle(client_fd);
512 if (FD_ISSET(svc_ctx->tcb_pipe[PIPE_READ], &set)) {
513 if (read(svc_ctx->tcb_pipe[PIPE_READ], &tcb, sizeof(tcb)) != sizeof(tcb)) {
520 * Invoke the service thread main, to notify the termination of a TCB
522 ret = svc_ctx->service_thread_main(tcb, NULL, svc_ctx->service_thread_data);
525 * at this time, the client thread can access this tcb.
526 * how can I protect this TCB from deletion without disturbing the server thread?
528 tcb_destroy(svc_ctx, tcb);
531 if (FD_ISSET(svc_ctx->evt_pipe[PIPE_READ], &set)) {
532 if (read(svc_ctx->evt_pipe[PIPE_READ], &evt_ch, sizeof(evt_ch)) != sizeof(evt_ch)) {
537 CRITICAL_SECTION_BEGIN(&svc_ctx->packet_list_lock);
538 packet_info = eina_list_nth(svc_ctx->packet_list, 0);
539 svc_ctx->packet_list = eina_list_remove(svc_ctx->packet_list, packet_info);
540 CRITICAL_SECTION_END(&svc_ctx->packet_list_lock);
544 * What happens if the client thread is terminated, so the packet_info->tcb is deleted
545 * while processing svc_ctx->service_thread_main?
547 ret = svc_ctx->service_thread_main(packet_info->tcb, packet_info->packet, svc_ctx->service_thread_data);
549 packet_destroy(packet_info->packet);
553 processing_timer_event(svc_ctx, &set);
554 /* If there is no such triggered FD? */
558 * Consuming all pended packets before terminates server thread.
560 * If the server thread is terminated, we should flush all pended packets.
561 * And we should services them.
562 * While processing this routine, the mutex is locked.
563 * So every other client thread will be slowed down, sequently, every clients can meet problems.
564 * But in case of termination of server thread, there could be systemetic problem.
565 * This only should be happenes while terminating the master daemon process.
567 CRITICAL_SECTION_BEGIN(&svc_ctx->packet_list_lock);
568 EINA_LIST_FREE(svc_ctx->packet_list, packet_info) {
569 ret = read(svc_ctx->evt_pipe[PIPE_READ], &evt_ch, sizeof(evt_ch));
570 ret = svc_ctx->service_thread_main(packet_info->tcb, packet_info->packet, svc_ctx->service_thread_data);
571 packet_destroy(packet_info->packet);
574 CRITICAL_SECTION_END(&svc_ctx->packet_list_lock);
576 tcb_teminate_all(svc_ctx);
584 struct service_context *service_common_create(const char *addr, int (*service_thread_main)(struct tcb *tcb, struct packet *packet, void *data), void *data)
587 struct service_context *svc_ctx = NULL;
589 if (!service_thread_main || !addr) {
593 svc_ctx = calloc(1, sizeof(*svc_ctx));
598 svc_ctx->fd = secure_socket_create_server(addr);
599 if (svc_ctx->fd < 0) {
604 svc_ctx->service_thread_main = service_thread_main;
605 svc_ctx->service_thread_data = data;
607 fcntl(svc_ctx->fd, F_SETFD, FD_CLOEXEC);
609 fcntl(svc_ctx->fd, F_SETFL, O_NONBLOCK);
611 if (pipe2(svc_ctx->evt_pipe, O_NONBLOCK | O_CLOEXEC) < 0) {
612 secure_socket_destroy_handle(svc_ctx->fd);
617 if (pipe2(svc_ctx->tcb_pipe, O_NONBLOCK | O_CLOEXEC) < 0) {
618 CLOSE_PIPE(svc_ctx->evt_pipe);
619 secure_socket_destroy_handle(svc_ctx->fd);
624 status = pthread_mutex_init(&svc_ctx->packet_list_lock, NULL);
626 CLOSE_PIPE(svc_ctx->evt_pipe);
627 CLOSE_PIPE(svc_ctx->tcb_pipe);
628 secure_socket_destroy_handle(svc_ctx->fd);
633 ret = cynara_initialize(&svc_ctx->p_cynara, NULL);
634 if (ret != CYNARA_API_SUCCESS) {
635 print_cynara_error("Cynara initialize failed", ret);
636 status = pthread_mutex_destroy(&svc_ctx->packet_list_lock);
637 CLOSE_PIPE(svc_ctx->evt_pipe);
638 CLOSE_PIPE(svc_ctx->tcb_pipe);
639 secure_socket_destroy_handle(svc_ctx->fd);
644 status = pthread_create(&svc_ctx->server_thid, NULL, server_main, svc_ctx);
646 status = pthread_mutex_destroy(&svc_ctx->packet_list_lock);
647 CLOSE_PIPE(svc_ctx->evt_pipe);
648 CLOSE_PIPE(svc_ctx->tcb_pipe);
649 secure_socket_destroy_handle(svc_ctx->fd);
650 cynara_finish(svc_ctx->p_cynara);
662 int service_common_destroy(struct service_context *svc_ctx)
672 * Terminate server thread
674 secure_socket_destroy_handle(svc_ctx->fd);
676 status = pthread_join(svc_ctx->server_thid, &ret);
677 status = pthread_mutex_destroy(&svc_ctx->packet_list_lock);
679 (void)cynara_finish(svc_ctx->p_cynara);
681 CLOSE_PIPE(svc_ctx->evt_pipe);
682 CLOSE_PIPE(svc_ctx->tcb_pipe);
691 int tcb_fd(struct tcb *tcb)
703 int tcb_client_type(struct tcb *tcb)
715 int tcb_client_type_set(struct tcb *tcb, enum tcb_type type)
728 struct service_context *tcb_svc_ctx(struct tcb *tcb)
740 int service_common_unicast_packet(struct tcb *tcb, struct packet *packet)
745 return secure_socket_send(tcb->fd, (void *)packet_data(packet), packet_size(packet));
752 int service_common_multicast_packet(struct tcb *tcb, struct packet *packet, int type)
756 struct service_context *svc_ctx;
762 svc_ctx = tcb->svc_ctx;
764 EINA_LIST_FOREACH(svc_ctx->tcb_list, l, target) {
765 if (target == tcb || target->type != type) {
769 ret = secure_socket_send(target->fd, (void *)packet_data(packet), packet_size(packet));
778 struct service_event_item *service_common_add_timer(struct service_context *svc_ctx, double timer, int (*timer_cb)(struct service_context *svc_cx, void *data), void *data)
780 struct service_event_item *item;
781 struct itimerspec spec;
783 item = calloc(1, sizeof(*item));
788 item->type = SERVICE_EVENT_TIMER;
789 item->info.timer.fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK | TFD_CLOEXEC);
790 if (item->info.timer.fd < 0) {
795 spec.it_interval.tv_sec = (time_t)timer;
796 spec.it_interval.tv_nsec = (timer - spec.it_interval.tv_sec) * 1000000000;
797 spec.it_value.tv_sec = 0;
798 spec.it_value.tv_nsec = 0;
800 if (timerfd_settime(item->info.timer.fd, 0, &spec, NULL) < 0) {
801 close(item->info.timer.fd);
806 item->event_cb = timer_cb;
809 svc_ctx->event_list = eina_list_append(svc_ctx->event_list, item);
817 int service_common_del_timer(struct service_context *svc_ctx, struct service_event_item *item)
819 if (!eina_list_data_find(svc_ctx->event_list, item)) {
823 svc_ctx->event_list = eina_list_remove(svc_ctx->event_list, item);
825 close(item->info.timer.fd);
831 void print_cynara_error(int ret, char *msg) {
832 char buff[255] = {0};
833 if (cynara_strerror(ret, buff, sizeof(buff)) != CYNARA_API_SUCCESS) {
834 fprintf(stderr, "Cynara strerror failed\n");
836 fprintf(stderr, "%s (%d) : %s\n", msg, ret, buff);
839 int fill_creds(struct tcb *tcb) {
843 char *session = NULL;
846 ret = cynara_creds_socket_get_client(tcb->fd, CLIENT_METHOD_DEFAULT, &client);
847 if (ret != CYNARA_API_SUCCESS) {
848 print_cynara_error(ret, "Cynara creds socket get client failed");
852 ret = cynara_creds_socket_get_user(tcb->fd, USER_METHOD_DEFAULT, &user);
853 if (ret != CYNARA_API_SUCCESS) {
855 print_cynara_error(ret, "Cynara creds socket get user failed");
859 ret = cynara_creds_socket_get_pid(tcb->fd, &pid);
860 if (ret != CYNARA_API_SUCCESS) {
863 print_cynara_error(ret, "Cynara creds socket get pid failed");
867 session = cynara_session_from_pid(pid);
871 fprintf(stderr, "Cynara session from pid failed");
876 tcb->client = client;
877 tcb->session = session;
881 int check_cynara(struct tcb *tcb) {
884 static const char *privilege = "http://tizen.org/privilege/notification";
886 ret = cynara_check(tcb->svc_ctx->p_cynara, tcb->client, tcb->session, tcb->user, privilege);
888 if (ret == CYNARA_API_ACCESS_ALLOWED) {
891 if (ret != CYNARA_API_ACCESS_DENIED) {
892 print_cynara_error(ret, "Cynara check failed");