2 * Copyright (C) 2013-2015 Kay Sievers
3 * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
4 * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
5 * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
6 * Copyright (C) 2013-2015 Linux Foundation
7 * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
9 * kdbus is free software; you can redistribute it and/or modify it under
10 * the terms of the GNU Lesser General Public License as published by the
11 * Free Software Foundation; either version 2.1 of the License, or (at
12 * your option) any later version.
15 #include <linux/audit.h>
16 #include <linux/file.h>
18 #include <linux/fs_struct.h>
19 #include <linux/hashtable.h>
20 #include <linux/idr.h>
21 #include <linux/init.h>
22 #include <linux/math64.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/path.h>
27 #include <linux/poll.h>
28 #include <linux/sched.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/sizes.h>
31 #include <linux/slab.h>
32 #include <linux/syscalls.h>
33 #include <linux/uio.h>
36 #include "connection.h"
52 #define KDBUS_CONN_ACTIVE_BIAS (INT_MIN + 2)
53 #define KDBUS_CONN_ACTIVE_NEW (INT_MIN + 1)
55 /* Disable internal kdbus policy - possibilities of connections to own, see and
56 * talk to names are restricted by libdbuspolicy library and LSM hooks
58 #define DISABLE_KDBUS_POLICY
60 static struct kdbus_conn *kdbus_conn_new(struct kdbus_ep *ep,
62 struct kdbus_cmd_hello *hello,
64 const struct kdbus_creds *creds,
65 const struct kdbus_pids *pids,
67 const char *conn_description)
69 #ifdef CONFIG_DEBUG_LOCK_ALLOC
70 static struct lock_class_key __key;
72 struct kdbus_pool_slice *slice = NULL;
73 struct kdbus_bus *bus = ep->bus;
74 struct kdbus_conn *conn;
75 u64 attach_flags_send;
76 u64 attach_flags_recv;
78 bool is_policy_holder;
89 struct kdbus_bloom_parameter bloom;
92 privileged = kdbus_ep_is_privileged(ep, file);
93 owner = kdbus_ep_is_owner(ep, file);
95 is_monitor = hello->flags & KDBUS_HELLO_MONITOR;
96 is_activator = hello->flags & KDBUS_HELLO_ACTIVATOR;
97 is_policy_holder = hello->flags & KDBUS_HELLO_POLICY_HOLDER;
99 if (!hello->pool_size || !IS_ALIGNED(hello->pool_size, PAGE_SIZE))
100 return ERR_PTR(-EINVAL);
101 if (is_monitor + is_activator + is_policy_holder > 1)
102 return ERR_PTR(-EINVAL);
103 if (name && !is_activator && !is_policy_holder)
104 return ERR_PTR(-EINVAL);
105 if (!name && (is_activator || is_policy_holder))
106 return ERR_PTR(-EINVAL);
107 if (name && !kdbus_name_is_valid(name, true))
108 return ERR_PTR(-EINVAL);
109 if (is_monitor && ep->user)
110 return ERR_PTR(-EOPNOTSUPP);
111 if (!owner && (is_activator || is_policy_holder || is_monitor))
112 return ERR_PTR(-EPERM);
113 if (!owner && (creds || pids || seclabel))
114 return ERR_PTR(-EPERM);
116 ret = kdbus_sanitize_attach_flags(hello->attach_flags_send,
121 ret = kdbus_sanitize_attach_flags(hello->attach_flags_recv,
126 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
128 return ERR_PTR(-ENOMEM);
130 kref_init(&conn->kref);
131 atomic_set(&conn->active, KDBUS_CONN_ACTIVE_NEW);
132 #ifdef CONFIG_DEBUG_LOCK_ALLOC
133 lockdep_init_map(&conn->dep_map, "s_active", &__key, 0);
135 mutex_init(&conn->lock);
136 INIT_LIST_HEAD(&conn->names_list);
137 INIT_LIST_HEAD(&conn->reply_list);
138 atomic_set(&conn->request_count, 0);
139 atomic_set(&conn->lost_count, 0);
140 INIT_DELAYED_WORK(&conn->work, kdbus_reply_list_scan_work);
141 conn->cred = get_cred(file->f_cred);
142 conn->pid = get_pid(task_pid(current));
143 get_fs_root(current->fs, &conn->root_path);
144 init_waitqueue_head(&conn->wait);
145 kdbus_queue_init(&conn->queue);
146 conn->privileged = privileged;
148 conn->ep = kdbus_ep_ref(ep);
149 conn->id = atomic64_inc_return(&bus->domain->last_id);
150 conn->flags = hello->flags;
151 atomic64_set(&conn->attach_flags_send, attach_flags_send);
152 atomic64_set(&conn->attach_flags_recv, attach_flags_recv);
153 INIT_LIST_HEAD(&conn->monitor_entry);
155 if (conn_description) {
156 conn->description = kstrdup(conn_description, GFP_KERNEL);
157 if (!conn->description) {
163 conn->pool = kdbus_pool_new(conn->description, hello->pool_size);
164 if (IS_ERR(conn->pool)) {
165 ret = PTR_ERR(conn->pool);
170 conn->match_db = kdbus_match_db_new();
171 if (IS_ERR(conn->match_db)) {
172 ret = PTR_ERR(conn->match_db);
173 conn->match_db = NULL;
177 /* return properties of this connection to the caller */
178 hello->bus_flags = bus->bus_flags;
179 hello->id = conn->id;
181 BUILD_BUG_ON(sizeof(bus->id128) != sizeof(hello->id128));
182 memcpy(hello->id128, bus->id128, sizeof(hello->id128));
184 /* privileged processes can impersonate somebody else */
185 if (creds || pids || seclabel) {
186 conn->meta_fake = kdbus_meta_fake_new();
187 if (IS_ERR(conn->meta_fake)) {
188 ret = PTR_ERR(conn->meta_fake);
189 conn->meta_fake = NULL;
193 ret = kdbus_meta_fake_collect(conn->meta_fake,
194 creds, pids, seclabel);
198 conn->meta_proc = kdbus_meta_proc_new();
199 if (IS_ERR(conn->meta_proc)) {
200 ret = PTR_ERR(conn->meta_proc);
201 conn->meta_proc = NULL;
205 ret = kdbus_meta_proc_collect(conn->meta_proc,
208 KDBUS_ATTACH_AUXGROUPS |
209 KDBUS_ATTACH_TID_COMM |
210 KDBUS_ATTACH_PID_COMM |
212 KDBUS_ATTACH_CMDLINE |
213 KDBUS_ATTACH_CGROUP |
215 KDBUS_ATTACH_SECLABEL |
222 * Account the connection against the current user (UID), or for
223 * custom endpoints use the anonymous user assigned to the endpoint.
224 * Note that limits are always accounted against the real UID, not
225 * the effective UID (cred->user always points to the accounting of
226 * cred->uid, not cred->euid).
227 * In case the caller is privileged, we allow changing the accounting
231 conn->user = kdbus_user_ref(ep->user);
235 if (conn->meta_fake && uid_valid(conn->meta_fake->uid) &&
237 uid = conn->meta_fake->uid;
239 uid = conn->cred->uid;
241 conn->user = kdbus_user_lookup(ep->bus->domain, uid);
242 if (IS_ERR(conn->user)) {
243 ret = PTR_ERR(conn->user);
249 if (atomic_inc_return(&conn->user->connections) > KDBUS_USER_MAX_CONN) {
250 /* decremented by destructor as conn->user is valid */
255 bloom_item.size = sizeof(bloom_item);
256 bloom_item.type = KDBUS_ITEM_BLOOM_PARAMETER;
257 bloom_item.bloom = bus->bloom;
258 kdbus_kvec_set(&kvec, &bloom_item, bloom_item.size, &items_size);
260 slice = kdbus_pool_slice_alloc(conn->pool, items_size, false);
262 ret = PTR_ERR(slice);
267 ret = kdbus_pool_slice_copy_kvec(slice, 0, &kvec, 1, items_size);
271 kdbus_pool_slice_publish(slice, &hello->offset, &hello->items_size);
272 kdbus_pool_slice_release(slice);
277 kdbus_pool_slice_release(slice);
278 kdbus_conn_unref(conn);
282 static void __kdbus_conn_free(struct kref *kref)
284 struct kdbus_conn *conn = container_of(kref, struct kdbus_conn, kref);
286 WARN_ON(kdbus_conn_active(conn));
287 WARN_ON(delayed_work_pending(&conn->work));
288 WARN_ON(!list_empty(&conn->queue.msg_list));
289 WARN_ON(!list_empty(&conn->names_list));
290 WARN_ON(!list_empty(&conn->reply_list));
293 atomic_dec(&conn->user->connections);
294 kdbus_user_unref(conn->user);
297 kdbus_meta_fake_free(conn->meta_fake);
298 kdbus_meta_proc_unref(conn->meta_proc);
299 kdbus_match_db_free(conn->match_db);
300 kdbus_pool_free(conn->pool);
301 kdbus_ep_unref(conn->ep);
302 path_put(&conn->root_path);
304 put_cred(conn->cred);
305 kfree(conn->description);
311 * kdbus_conn_ref() - take a connection reference
312 * @conn: Connection, may be %NULL
314 * Return: the connection itself
316 struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn)
319 kref_get(&conn->kref);
324 * kdbus_conn_unref() - drop a connection reference
325 * @conn: Connection (may be NULL)
327 * When the last reference is dropped, the connection's internal structure
332 struct kdbus_conn *kdbus_conn_unref(struct kdbus_conn *conn)
335 kref_put(&conn->kref, __kdbus_conn_free);
340 * kdbus_conn_active() - connection is not disconnected
341 * @conn: Connection to check
343 * Return true if the connection was not disconnected, yet. Note that a
344 * connection might be disconnected asynchronously, unless you hold the
345 * connection lock. If that's not suitable for you, see kdbus_conn_acquire() to
346 * suppress connection shutdown for a short period.
348 * Return: true if the connection is still active
350 bool kdbus_conn_active(const struct kdbus_conn *conn)
352 return atomic_read(&conn->active) >= 0;
356 * kdbus_conn_acquire() - acquire an active connection reference
359 * Users can close a connection via KDBUS_BYEBYE (or by destroying the
360 * endpoint/bus/...) at any time. Whenever this happens, we should deny any
361 * user-visible action on this connection and signal ECONNRESET instead.
362 * To avoid testing for connection availability everytime you take the
363 * connection-lock, you can acquire a connection for short periods.
365 * By calling kdbus_conn_acquire(), you gain an "active reference" to the
366 * connection. You must also hold a regular reference at any time! As long as
367 * you hold the active-ref, the connection will not be shut down. However, if
368 * the connection was shut down, you can never acquire an active-ref again.
370 * kdbus_conn_disconnect() disables the connection and then waits for all active
371 * references to be dropped. It will also wake up any pending operation.
372 * However, you must not sleep for an indefinite period while holding an
373 * active-reference. Otherwise, kdbus_conn_disconnect() might stall. If you need
374 * to sleep for an indefinite period, either release the reference and try to
375 * acquire it again after waking up, or make kdbus_conn_disconnect() wake up
378 * Return: 0 on success, negative error code on failure.
380 int kdbus_conn_acquire(struct kdbus_conn *conn)
382 if (!atomic_inc_unless_negative(&conn->active))
385 #ifdef CONFIG_DEBUG_LOCK_ALLOC
386 rwsem_acquire_read(&conn->dep_map, 0, 1, _RET_IP_);
393 * kdbus_conn_release() - release an active connection reference
396 * This releases an active reference that has been acquired via
397 * kdbus_conn_acquire(). If the connection was already disabled and this is the
398 * last active-ref that is dropped, the disconnect-waiter will be woken up and
399 * properly close the connection.
401 void kdbus_conn_release(struct kdbus_conn *conn)
408 #ifdef CONFIG_DEBUG_LOCK_ALLOC
409 rwsem_release(&conn->dep_map, 1, _RET_IP_);
412 v = atomic_dec_return(&conn->active);
413 if (v != KDBUS_CONN_ACTIVE_BIAS)
416 wake_up_all(&conn->wait);
419 static int kdbus_conn_connect(struct kdbus_conn *conn, const char *name)
421 struct kdbus_ep *ep = conn->ep;
422 struct kdbus_bus *bus = ep->bus;
425 if (WARN_ON(atomic_read(&conn->active) != KDBUS_CONN_ACTIVE_NEW))
428 /* make sure the ep-node is active while we add our connection */
429 if (!kdbus_node_acquire(&ep->node))
432 /* lock order: domain -> bus -> ep -> names -> conn */
433 mutex_lock(&ep->lock);
434 down_write(&bus->conn_rwlock);
436 /* link into monitor list */
437 if (kdbus_conn_is_monitor(conn))
438 list_add_tail(&conn->monitor_entry, &bus->monitors_list);
440 /* link into bus and endpoint */
441 list_add_tail(&conn->ep_entry, &ep->conn_list);
442 hash_add(bus->conn_hash, &conn->hentry, conn->id);
444 /* enable lookups and acquire active ref */
445 atomic_set(&conn->active, 1);
446 #ifdef CONFIG_DEBUG_LOCK_ALLOC
447 rwsem_acquire_read(&conn->dep_map, 0, 1, _RET_IP_);
450 up_write(&bus->conn_rwlock);
451 mutex_unlock(&ep->lock);
453 kdbus_node_release(&ep->node);
456 * Notify subscribers about the new active connection, unless it is
457 * a monitor. Monitors are invisible on the bus, can't be addressed
458 * directly, and won't cause any notifications.
460 if (!kdbus_conn_is_monitor(conn)) {
461 ret = kdbus_notify_id_change(bus, KDBUS_ITEM_ID_ADD,
462 conn->id, conn->flags);
464 goto exit_disconnect;
467 if (kdbus_conn_is_activator(conn)) {
468 u64 flags = KDBUS_NAME_ACTIVATOR;
470 if (WARN_ON(!name)) {
472 goto exit_disconnect;
475 ret = kdbus_name_acquire(bus->name_registry, conn, name,
478 goto exit_disconnect;
481 kdbus_conn_release(conn);
482 kdbus_notify_flush(bus);
486 kdbus_conn_release(conn);
487 kdbus_conn_disconnect(conn, false);
492 * kdbus_conn_disconnect() - disconnect a connection
493 * @conn: The connection to disconnect
494 * @ensure_queue_empty: Flag to indicate if the call should fail in
495 * case the connection's message list is not
498 * If @ensure_msg_list_empty is true, and the connection has pending messages,
499 * -EBUSY is returned.
501 * Return: 0 on success, negative errno on failure
503 int kdbus_conn_disconnect(struct kdbus_conn *conn, bool ensure_queue_empty)
505 struct kdbus_queue_entry *entry, *tmp;
506 struct kdbus_bus *bus = conn->ep->bus;
507 struct kdbus_reply *r, *r_tmp;
508 struct kdbus_conn *c;
511 mutex_lock(&conn->lock);
512 v = atomic_read(&conn->active);
513 if (v == KDBUS_CONN_ACTIVE_NEW) {
514 /* was never connected */
515 mutex_unlock(&conn->lock);
520 mutex_unlock(&conn->lock);
523 if (ensure_queue_empty && !list_empty(&conn->queue.msg_list)) {
525 mutex_unlock(&conn->lock);
529 atomic_add(KDBUS_CONN_ACTIVE_BIAS, &conn->active);
530 mutex_unlock(&conn->lock);
532 wake_up_interruptible(&conn->wait);
534 #ifdef CONFIG_DEBUG_LOCK_ALLOC
535 rwsem_acquire(&conn->dep_map, 0, 0, _RET_IP_);
536 if (atomic_read(&conn->active) != KDBUS_CONN_ACTIVE_BIAS)
537 lock_contended(&conn->dep_map, _RET_IP_);
540 wait_event(conn->wait,
541 atomic_read(&conn->active) == KDBUS_CONN_ACTIVE_BIAS);
543 #ifdef CONFIG_DEBUG_LOCK_ALLOC
544 lock_acquired(&conn->dep_map, _RET_IP_);
545 rwsem_release(&conn->dep_map, 1, _RET_IP_);
548 cancel_delayed_work_sync(&conn->work);
549 kdbus_policy_remove_owner(&conn->ep->bus->policy_db, conn);
551 /* lock order: domain -> bus -> ep -> names -> conn */
552 mutex_lock(&conn->ep->lock);
553 down_write(&bus->conn_rwlock);
555 /* remove from bus and endpoint */
556 hash_del(&conn->hentry);
557 list_del(&conn->monitor_entry);
558 list_del(&conn->ep_entry);
560 up_write(&bus->conn_rwlock);
561 mutex_unlock(&conn->ep->lock);
564 * Remove all names associated with this connection; this possibly
565 * moves queued messages back to the activator connection.
567 kdbus_name_release_all(bus->name_registry, conn);
569 /* if we die while other connections wait for our reply, notify them */
570 mutex_lock(&conn->lock);
571 list_for_each_entry_safe(entry, tmp, &conn->queue.msg_list, entry) {
573 kdbus_notify_reply_dead(bus,
574 entry->reply->reply_dst->id,
575 entry->reply->cookie);
576 kdbus_queue_entry_free(entry);
579 list_for_each_entry_safe(r, r_tmp, &conn->reply_list, entry)
580 kdbus_reply_unlink(r);
581 mutex_unlock(&conn->lock);
583 /* lock order: domain -> bus -> ep -> names -> conn */
584 down_read(&bus->conn_rwlock);
585 hash_for_each(bus->conn_hash, i, c, hentry) {
586 mutex_lock(&c->lock);
587 list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
588 if (r->reply_src != conn)
592 kdbus_sync_reply_wakeup(r, -EPIPE);
594 /* send a 'connection dead' notification */
595 kdbus_notify_reply_dead(bus, c->id, r->cookie);
597 kdbus_reply_unlink(r);
599 mutex_unlock(&c->lock);
601 up_read(&bus->conn_rwlock);
603 if (!kdbus_conn_is_monitor(conn))
604 kdbus_notify_id_change(bus, KDBUS_ITEM_ID_REMOVE,
605 conn->id, conn->flags);
607 kdbus_notify_flush(bus);
613 * kdbus_conn_has_name() - check if a connection owns a name
615 * @name: Well-know name to check for
617 * The caller must hold the registry lock of conn->ep->bus.
619 * Return: true if the name is currently owned by the connection
621 bool kdbus_conn_has_name(struct kdbus_conn *conn, const char *name)
623 struct kdbus_name_owner *owner;
625 lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
627 list_for_each_entry(owner, &conn->names_list, conn_entry)
628 if (!(owner->flags & KDBUS_NAME_IN_QUEUE) &&
629 !strcmp(name, owner->name->name))
642 * kdbus_conn_quota_inc() - increase quota accounting
643 * @c: connection owning the quota tracking
644 * @u: user to account for (or NULL for kernel accounting)
645 * @memory: size of memory to account for
646 * @fds: number of FDs to account for
648 * This call manages the quotas on resource @c. That is, it's used if other
649 * users want to use the resources of connection @c, which so far only concerns
650 * the receive queue of the destination.
652 * This increases the quota-accounting for user @u by @memory bytes and @fds
653 * file descriptors. If the user has already reached the quota limits, this call
654 * will not do any accounting but return a negative error code indicating the
657 * Return: 0 on success, negative error code on failure.
659 int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
660 size_t memory, size_t fds)
662 struct kdbus_quota *quota;
663 size_t available, accounted;
668 * 50% of a pool is always owned by the connection. It is reserved for
669 * kernel queries, handling received messages and other tasks that are
670 * under control of the pool owner. The other 50% of the pool are used
672 * As we optionally support user-space based policies, we need fair
673 * allocation schemes. Furthermore, resource utilization should be
674 * maximized, so only minimal resources stay reserved. However, we need
675 * to adapt to a dynamic number of users, as we cannot know how many
676 * users will talk to a connection. Therefore, the current allocation
678 * We limit the number of bytes in a destination's pool per sending
679 * user. The space available for a user is 33% of the unused pool space
680 * (whereas the space used by the user itself is also treated as
681 * 'unused'). This way, we favor users coming first, but keep enough
682 * pool space available for any following users. Given that messages are
683 * dequeued in FIFO order, this should balance nicely if the number of
684 * users grows. At the same time, this algorithm guarantees that the
685 * space available to a connection is reduced dynamically, the more
686 * concurrent users talk to a connection.
689 /* per user-accounting is expensive, so we keep state small */
690 BUILD_BUG_ON(sizeof(quota->memory) != 4);
691 BUILD_BUG_ON(sizeof(quota->msgs) != 2);
692 BUILD_BUG_ON(sizeof(quota->fds) != 1);
693 BUILD_BUG_ON(KDBUS_CONN_MAX_MSGS > U16_MAX);
694 BUILD_BUG_ON(KDBUS_CONN_MAX_FDS_PER_USER > U8_MAX);
696 id = u ? u->id : KDBUS_USER_KERNEL_ID;
697 if (id >= c->n_quota) {
700 users = max(KDBUS_ALIGN8(id) + 8, id);
701 quota = krealloc(c->quota, users * sizeof(*quota),
702 GFP_KERNEL | __GFP_ZERO);
710 quota = &c->quota[id];
711 kdbus_pool_accounted(c->pool, &available, &accounted);
713 /* half the pool is _always_ reserved for the pool owner */
717 * Pool owner slices are un-accounted slices; they can claim more
718 * than 50% of the queue. However, the slices we're dealing with here
719 * belong to the incoming queue, hence they are 'accounted' slices
720 * to which the 50%-limit applies.
722 if (available < accounted)
725 /* 1/3 of the remaining space (including your own memory) */
726 available = (available - accounted + quota->memory) / 3;
728 if (available < quota->memory ||
729 available - quota->memory < memory ||
730 quota->memory + memory > U32_MAX)
732 if (quota->msgs >= KDBUS_CONN_MAX_MSGS)
734 if (quota->fds + fds < quota->fds ||
735 quota->fds + fds > KDBUS_CONN_MAX_FDS_PER_USER)
738 quota->memory += memory;
745 * kdbus_conn_quota_dec() - decrease quota accounting
746 * @c: connection owning the quota tracking
747 * @u: user which was accounted for (or NULL for kernel accounting)
748 * @memory: size of memory which was accounted for
749 * @fds: number of FDs which were accounted for
751 * This does the reverse of kdbus_conn_quota_inc(). You have to release any
752 * accounted resources that you called kdbus_conn_quota_inc() for. However, you
753 * must not call kdbus_conn_quota_dec() if the accounting failed (that is,
754 * kdbus_conn_quota_inc() failed).
756 void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
757 size_t memory, size_t fds)
759 struct kdbus_quota *quota;
762 id = u ? u->id : KDBUS_USER_KERNEL_ID;
763 if (WARN_ON(id >= c->n_quota))
766 quota = &c->quota[id];
768 if (!WARN_ON(quota->msgs == 0))
770 if (!WARN_ON(quota->memory < memory))
771 quota->memory -= memory;
772 if (!WARN_ON(quota->fds < fds))
777 * kdbus_conn_lost_message() - handle lost messages
778 * @c: connection that lost a message
780 * kdbus is reliable. That means, we try hard to never lose messages. However,
781 * memory is limited, so we cannot rely on transmissions to never fail.
782 * Therefore, we use quota-limits to let callers know if their unicast message
783 * cannot be transmitted to a peer. This works fine for unicasts, but for
784 * broadcasts we cannot make the caller handle the transmission failure.
785 * Instead, we must let the destination know that it couldn't receive a
787 * As this is an unlikely scenario, we keep it simple. A single lost-counter
788 * remembers the number of lost messages since the last call to RECV. The next
789 * message retrieval will notify the connection that it lost messages since the
790 * last message retrieval and thus should resync its state.
792 void kdbus_conn_lost_message(struct kdbus_conn *c)
794 if (atomic_inc_return(&c->lost_count) == 1)
795 wake_up_interruptible(&c->wait);
798 /* Callers should take the conn_dst lock */
799 static struct kdbus_queue_entry *
800 kdbus_conn_entry_make(struct kdbus_conn *conn_src,
801 struct kdbus_conn *conn_dst,
802 struct kdbus_staging *staging)
804 /* The remote connection was disconnected */
805 if (!kdbus_conn_active(conn_dst))
806 return ERR_PTR(-ECONNRESET);
809 * If the connection does not accept file descriptors but the message
810 * has some attached, refuse it.
812 * If this is a monitor connection, accept the message. In that
813 * case, all file descriptors will be set to -1 at receive time.
815 if (!kdbus_conn_is_monitor(conn_dst) &&
816 !(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
817 staging->gaps && staging->gaps->n_fds > 0)
818 return ERR_PTR(-ECOMM);
820 return kdbus_queue_entry_new(conn_src, conn_dst, staging);
824 * Synchronously responding to a message, allocate a queue entry
825 * and attach it to the reply tracking object.
826 * The connection's queue will never get to see it.
828 static int kdbus_conn_entry_sync_attach(struct kdbus_conn *conn_dst,
829 struct kdbus_staging *staging,
830 struct kdbus_reply *reply_wake)
832 struct kdbus_queue_entry *entry;
833 int remote_ret, ret = 0;
835 mutex_lock(&reply_wake->reply_dst->lock);
838 * If we are still waiting then proceed, allocate a queue
839 * entry and attach it to the reply object
841 if (reply_wake->waiting) {
842 entry = kdbus_conn_entry_make(reply_wake->reply_src, conn_dst,
845 ret = PTR_ERR(entry);
847 /* Attach the entry to the reply object */
848 reply_wake->queue_entry = entry;
854 * Update the reply object and wake up remote peer only
855 * on appropriate return codes
857 * * -ECOMM: if the replying connection failed with -ECOMM
858 * then wakeup remote peer with -EREMOTEIO
860 * We do this to differenciate between -ECOMM errors
861 * from the original sender perspective:
862 * -ECOMM error during the sync send and
863 * -ECOMM error during the sync reply, this last
864 * one is rewritten to -EREMOTEIO
866 * * Wake up on all other return codes.
871 remote_ret = -EREMOTEIO;
873 kdbus_sync_reply_wakeup(reply_wake, remote_ret);
874 mutex_unlock(&reply_wake->reply_dst->lock);
880 * kdbus_conn_entry_insert() - enqueue a message into the receiver's pool
881 * @conn_src: The sending connection
882 * @conn_dst: The connection to queue into
883 * @staging: Message to send
884 * @reply: The reply tracker to attach to the queue entry
885 * @name: Destination name this msg is sent to, or NULL
887 * Return: 0 on success. negative error otherwise.
889 int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
890 struct kdbus_conn *conn_dst,
891 struct kdbus_staging *staging,
892 struct kdbus_reply *reply,
893 const struct kdbus_name_entry *name)
895 struct kdbus_queue_entry *entry;
898 kdbus_conn_lock2(conn_src, conn_dst);
900 entry = kdbus_conn_entry_make(conn_src, conn_dst, staging);
902 ret = PTR_ERR(entry);
907 kdbus_reply_link(reply);
909 schedule_delayed_work(&conn_src->work, 0);
913 * Record the sequence number of the registered name; it will
914 * be remembered by the queue, in case messages addressed to a
915 * name need to be moved from or to an activator.
918 entry->dst_name_id = name->name_id;
920 kdbus_queue_entry_enqueue(entry, reply);
921 wake_up_interruptible(&conn_dst->wait);
926 kdbus_conn_unlock2(conn_src, conn_dst);
930 static int kdbus_conn_wait_reply(struct kdbus_conn *conn_src,
931 struct kdbus_cmd_send *cmd_send,
932 struct file *ioctl_file,
933 struct file *cancel_fd,
934 struct kdbus_reply *reply_wait,
937 struct kdbus_queue_entry *entry;
938 struct poll_wqueues pwq = {};
941 if (WARN_ON(!reply_wait))
945 * Block until the reply arrives. reply_wait is left untouched
946 * by the timeout scans that might be conducted for other,
947 * asynchronous replies of conn_src.
951 poll_wait(ioctl_file, &conn_src->wait, &pwq.pt);
955 * Any of the following conditions will stop our synchronously
956 * blocking SEND command:
958 * a) The origin sender closed its connection
959 * b) The remote peer answered, setting reply_wait->waiting = 0
960 * c) The cancel FD was written to
961 * d) A signal was received
962 * e) The specified timeout was reached, and none of the above
963 * conditions kicked in.
967 * We have already acquired an active reference when
968 * entering here, but another thread may call
969 * KDBUS_CMD_BYEBYE which does not acquire an active
970 * reference, therefore kdbus_conn_disconnect() will
973 if (!kdbus_conn_active(conn_src)) {
979 * After the replying peer unset the waiting variable
980 * it will wake up us.
982 if (!reply_wait->waiting) {
983 ret = reply_wait->err;
990 r = cancel_fd->f_op->poll(cancel_fd, &pwq.pt);
997 if (signal_pending(current)) {
1002 if (!poll_schedule_timeout(&pwq, TASK_INTERRUPTIBLE,
1009 * Reset the poll worker func, so the waitqueues are not
1010 * added to the poll table again. We just reuse what we've
1011 * collected earlier for further iterations.
1013 init_poll_funcptr(&pwq.pt, NULL);
1016 poll_freewait(&pwq);
1018 if (ret == -EINTR) {
1020 * Interrupted system call. Unref the reply object, and pass
1021 * the return value down the chain. Mark the reply as
1022 * interrupted, so the cleanup work can remove it, but do not
1023 * unlink it from the list. Once the syscall restarts, we'll
1024 * pick it up and wait on it again.
1026 mutex_lock(&conn_src->lock);
1027 reply_wait->interrupted = true;
1028 schedule_delayed_work(&conn_src->work, 0);
1029 mutex_unlock(&conn_src->lock);
1031 return -ERESTARTSYS;
1034 mutex_lock(&conn_src->lock);
1035 reply_wait->waiting = false;
1036 entry = reply_wait->queue_entry;
1038 ret = kdbus_queue_entry_install(entry,
1039 &cmd_send->reply.return_flags,
1041 kdbus_pool_slice_publish(entry->slice, &cmd_send->reply.offset,
1042 &cmd_send->reply.msg_size);
1043 kdbus_queue_entry_free(entry);
1045 kdbus_reply_unlink(reply_wait);
1046 mutex_unlock(&conn_src->lock);
1051 static int kdbus_pin_dst(struct kdbus_bus *bus,
1052 struct kdbus_staging *staging,
1053 struct kdbus_name_entry **out_name,
1054 struct kdbus_conn **out_dst)
1056 const struct kdbus_msg *msg = staging->msg;
1057 struct kdbus_name_owner *owner = NULL;
1058 struct kdbus_name_entry *name = NULL;
1059 struct kdbus_conn *dst = NULL;
1062 lockdep_assert_held(&bus->name_registry->rwlock);
1064 if (!staging->dst_name) {
1065 dst = kdbus_bus_find_conn_by_id(bus, msg->dst_id);
1069 if (!kdbus_conn_is_ordinary(dst)) {
1074 name = kdbus_name_lookup_unlocked(bus->name_registry,
1077 owner = kdbus_name_get_owner(name);
1082 * If both a name and a connection ID are given as destination
1083 * of a message, check that the currently owning connection of
1084 * the name matches the specified ID.
1085 * This way, we allow userspace to send the message to a
1086 * specific connection by ID only if the connection currently
1087 * owns the given name.
1089 if (msg->dst_id != KDBUS_DST_ID_NAME &&
1090 msg->dst_id != owner->conn->id)
1093 if ((msg->flags & KDBUS_MSG_NO_AUTO_START) &&
1094 kdbus_conn_is_activator(owner->conn))
1095 return -EADDRNOTAVAIL;
1097 dst = kdbus_conn_ref(owner->conn);
1105 kdbus_conn_unref(dst);
1109 static int kdbus_conn_reply(struct kdbus_conn *src,
1110 struct kdbus_staging *staging)
1112 const struct kdbus_msg *msg = staging->msg;
1113 struct kdbus_name_entry *name = NULL;
1114 struct kdbus_reply *reply, *wake = NULL;
1115 struct kdbus_conn *dst = NULL;
1116 struct kdbus_bus *bus = src->ep->bus;
1119 if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
1120 WARN_ON(msg->flags & KDBUS_MSG_EXPECT_REPLY) ||
1121 WARN_ON(msg->flags & KDBUS_MSG_SIGNAL))
1124 /* name-registry must be locked for lookup *and* collecting data */
1125 down_read(&bus->name_registry->rwlock);
1127 /* find and pin destination */
1129 ret = kdbus_pin_dst(bus, staging, &name, &dst);
1133 mutex_lock(&dst->lock);
1134 reply = kdbus_reply_find(src, dst, msg->cookie_reply);
1137 wake = kdbus_reply_ref(reply);
1139 kdbus_reply_unlink(reply);
1141 mutex_unlock(&dst->lock);
1146 kdbus_bus_eavesdrop(bus, src, staging);
1149 ret = kdbus_conn_entry_sync_attach(dst, staging, wake);
1151 ret = kdbus_conn_entry_insert(src, dst, staging, NULL, name);
1154 up_read(&bus->name_registry->rwlock);
1155 kdbus_reply_unref(wake);
1156 kdbus_conn_unref(dst);
1160 static struct kdbus_reply *kdbus_conn_call(struct kdbus_conn *src,
1161 struct kdbus_staging *staging,
1164 const struct kdbus_msg *msg = staging->msg;
1165 struct kdbus_name_entry *name = NULL;
1166 struct kdbus_reply *wait = NULL;
1167 struct kdbus_conn *dst = NULL;
1168 struct kdbus_bus *bus = src->ep->bus;
1171 if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
1172 WARN_ON(msg->flags & KDBUS_MSG_SIGNAL) ||
1173 WARN_ON(!(msg->flags & KDBUS_MSG_EXPECT_REPLY)))
1174 return ERR_PTR(-EINVAL);
1176 /* resume previous wait-context, if available */
1178 mutex_lock(&src->lock);
1179 wait = kdbus_reply_find(NULL, src, msg->cookie);
1181 if (wait->interrupted) {
1182 kdbus_reply_ref(wait);
1183 wait->interrupted = false;
1188 mutex_unlock(&src->lock);
1193 if (ktime_compare(ktime_get(), exp) >= 0)
1194 return ERR_PTR(-ETIMEDOUT);
1196 /* name-registry must be locked for lookup *and* collecting data */
1197 down_read(&bus->name_registry->rwlock);
1199 /* find and pin destination */
1201 ret = kdbus_pin_dst(bus, staging, &name, &dst);
1205 if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
1210 wait = kdbus_reply_new(dst, src, msg, name, true);
1212 ret = PTR_ERR(wait);
1219 kdbus_bus_eavesdrop(bus, src, staging);
1221 ret = kdbus_conn_entry_insert(src, dst, staging, wait, name);
1228 up_read(&bus->name_registry->rwlock);
1230 kdbus_reply_unref(wait);
1231 wait = ERR_PTR(ret);
1233 kdbus_conn_unref(dst);
1237 static int kdbus_conn_unicast(struct kdbus_conn *src,
1238 struct kdbus_staging *staging)
1240 const struct kdbus_msg *msg = staging->msg;
1241 struct kdbus_name_entry *name = NULL;
1242 struct kdbus_reply *wait = NULL;
1243 struct kdbus_conn *dst = NULL;
1244 struct kdbus_bus *bus = src->ep->bus;
1245 bool is_signal = (msg->flags & KDBUS_MSG_SIGNAL);
1248 if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
1249 WARN_ON(!(msg->flags & KDBUS_MSG_EXPECT_REPLY) &&
1250 msg->cookie_reply != 0))
1253 /* name-registry must be locked for lookup *and* collecting data */
1254 down_read(&bus->name_registry->rwlock);
1256 /* find and pin destination */
1258 ret = kdbus_pin_dst(bus, staging, &name, &dst);
1263 /* like broadcasts we eavesdrop even if the msg is dropped */
1264 kdbus_bus_eavesdrop(bus, src, staging);
1266 /* drop silently if peer is not interested or not privileged */
1267 if (!kdbus_match_db_match_msg(dst->match_db, src, staging) ||
1268 !kdbus_conn_policy_talk(dst, NULL, src))
1270 } else if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
1273 } else if (msg->flags & KDBUS_MSG_EXPECT_REPLY) {
1274 wait = kdbus_reply_new(dst, src, msg, name, false);
1276 ret = PTR_ERR(wait);
1285 kdbus_bus_eavesdrop(bus, src, staging);
1287 ret = kdbus_conn_entry_insert(src, dst, staging, wait, name);
1288 if (ret < 0 && !is_signal)
1291 /* signals are treated like broadcasts, recv-errors are ignored */
1295 up_read(&bus->name_registry->rwlock);
1296 kdbus_reply_unref(wait);
1297 kdbus_conn_unref(dst);
1302 * kdbus_conn_move_messages() - move messages from one connection to another
1303 * @conn_dst: Connection to copy to
1304 * @conn_src: Connection to copy from
1305 * @name_id: Filter for the sequence number of the registered
1306 * name, 0 means no filtering.
1308 * Move all messages from one connection to another. This is used when
1309 * an implementer connection is taking over/giving back a well-known name
1310 * from/to an activator connection.
1312 void kdbus_conn_move_messages(struct kdbus_conn *conn_dst,
1313 struct kdbus_conn *conn_src,
1316 struct kdbus_queue_entry *e, *e_tmp;
1317 struct kdbus_reply *r, *r_tmp;
1318 struct kdbus_bus *bus;
1319 struct kdbus_conn *c;
1320 LIST_HEAD(msg_list);
1323 if (WARN_ON(conn_src == conn_dst))
1326 bus = conn_src->ep->bus;
1328 /* lock order: domain -> bus -> ep -> names -> conn */
1329 down_read(&bus->conn_rwlock);
1330 hash_for_each(bus->conn_hash, i, c, hentry) {
1331 if (c == conn_src || c == conn_dst)
1334 mutex_lock(&c->lock);
1335 list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
1336 if (r->reply_src != conn_src)
1339 /* filter messages for a specific name */
1340 if (name_id > 0 && r->name_id != name_id)
1343 kdbus_conn_unref(r->reply_src);
1344 r->reply_src = kdbus_conn_ref(conn_dst);
1346 mutex_unlock(&c->lock);
1348 up_read(&bus->conn_rwlock);
1350 kdbus_conn_lock2(conn_src, conn_dst);
1351 list_for_each_entry_safe(e, e_tmp, &conn_src->queue.msg_list, entry) {
1352 /* filter messages for a specific name */
1353 if (name_id > 0 && e->dst_name_id != name_id)
1356 if (!(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
1357 e->gaps && e->gaps->n_fds > 0) {
1358 kdbus_conn_lost_message(conn_dst);
1359 kdbus_queue_entry_free(e);
1363 ret = kdbus_queue_entry_move(e, conn_dst);
1365 kdbus_conn_lost_message(conn_dst);
1366 kdbus_queue_entry_free(e);
1370 kdbus_conn_unlock2(conn_src, conn_dst);
1372 /* wake up poll() */
1373 wake_up_interruptible(&conn_dst->wait);
1376 /* query the policy-database for all names of @whom */
1377 static bool kdbus_conn_policy_query_all(struct kdbus_conn *conn,
1378 const struct cred *conn_creds,
1379 struct kdbus_policy_db *db,
1380 struct kdbus_conn *whom,
1381 unsigned int access)
1383 struct kdbus_name_owner *owner;
1387 lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
1389 down_read(&db->entries_rwlock);
1390 mutex_lock(&whom->lock);
1392 list_for_each_entry(owner, &whom->names_list, conn_entry) {
1393 if (owner->flags & KDBUS_NAME_IN_QUEUE)
1396 res = kdbus_policy_query_unlocked(db,
1397 conn_creds ? : conn->cred,
1399 kdbus_strhash(owner->name->name));
1400 if (res >= (int)access) {
1406 mutex_unlock(&whom->lock);
1407 up_read(&db->entries_rwlock);
1413 * kdbus_conn_policy_own_name() - verify a connection can own the given name
1415 * @conn_creds: Credentials of @conn to use for policy check
1418 * This verifies that @conn is allowed to acquire the well-known name @name.
1420 * Return: true if allowed, false if not.
1422 bool kdbus_conn_policy_own_name(struct kdbus_conn *conn,
1423 const struct cred *conn_creds,
1426 unsigned int hash = kdbus_strhash(name);
1429 #ifdef DISABLE_KDBUS_POLICY
1434 conn_creds = conn->cred;
1436 if (conn->ep->user) {
1437 res = kdbus_policy_query(&conn->ep->policy_db, conn_creds,
1439 if (res < KDBUS_POLICY_OWN)
1446 res = kdbus_policy_query(&conn->ep->bus->policy_db, conn_creds,
1448 return res >= KDBUS_POLICY_OWN;
1452 * kdbus_conn_policy_talk() - verify a connection can talk to a given peer
1453 * @conn: Connection that tries to talk
1454 * @conn_creds: Credentials of @conn to use for policy check
1455 * @to: Connection that is talked to
1457 * This verifies that @conn is allowed to talk to @to.
1459 * Return: true if allowed, false if not.
1461 bool kdbus_conn_policy_talk(struct kdbus_conn *conn,
1462 const struct cred *conn_creds,
1463 struct kdbus_conn *to)
1466 #ifdef DISABLE_KDBUS_POLICY
1471 conn_creds = conn->cred;
1473 if (conn->ep->user &&
1474 !kdbus_conn_policy_query_all(conn, conn_creds, &conn->ep->policy_db,
1475 to, KDBUS_POLICY_TALK))
1480 if (uid_eq(conn_creds->euid, to->cred->uid))
1483 return kdbus_conn_policy_query_all(conn, conn_creds,
1484 &conn->ep->bus->policy_db, to,
1489 * kdbus_conn_policy_see_name_unlocked() - verify a connection can see a given
1492 * @conn_creds: Credentials of @conn to use for policy check
1495 * This verifies that @conn is allowed to see the well-known name @name. Caller
1496 * must hold policy-lock.
1498 * Return: true if allowed, false if not.
1500 bool kdbus_conn_policy_see_name_unlocked(struct kdbus_conn *conn,
1501 const struct cred *conn_creds,
1506 #ifdef DISABLE_KDBUS_POLICY
1511 * By default, all names are visible on a bus. SEE policies can only be
1512 * installed on custom endpoints, where by default no name is visible.
1514 if (!conn->ep->user)
1517 res = kdbus_policy_query_unlocked(&conn->ep->policy_db,
1518 conn_creds ? : conn->cred,
1519 name, kdbus_strhash(name));
1520 return res >= KDBUS_POLICY_SEE;
1523 static bool kdbus_conn_policy_see_name(struct kdbus_conn *conn,
1524 const struct cred *conn_creds,
1529 down_read(&conn->ep->policy_db.entries_rwlock);
1530 res = kdbus_conn_policy_see_name_unlocked(conn, conn_creds, name);
1531 up_read(&conn->ep->policy_db.entries_rwlock);
1536 static bool kdbus_conn_policy_see(struct kdbus_conn *conn,
1537 const struct cred *conn_creds,
1538 struct kdbus_conn *whom)
1541 #ifdef DISABLE_KDBUS_POLICY
1546 * By default, all names are visible on a bus, so a connection can
1547 * always see other connections. SEE policies can only be installed on
1548 * custom endpoints, where by default no name is visible and we hide
1549 * peers from each other, unless you see at least _one_ name of the
1552 return !conn->ep->user ||
1553 kdbus_conn_policy_query_all(conn, conn_creds,
1554 &conn->ep->policy_db, whom,
1559 * kdbus_conn_policy_see_notification() - verify a connection is allowed to
1560 * receive a given kernel notification
1562 * @conn_creds: Credentials of @conn to use for policy check
1563 * @msg: Notification message
1565 * This checks whether @conn is allowed to see the kernel notification.
1567 * Return: true if allowed, false if not.
1569 bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
1570 const struct cred *conn_creds,
1571 const struct kdbus_msg *msg)
1574 * Depending on the notification type, broadcasted kernel notifications
1575 * have to be filtered:
1577 * KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE}: This notification is forwarded
1578 * to a peer if, and only if, that peer can see the name this
1579 * notification is for.
1581 * KDBUS_ITEM_ID_{ADD,REMOVE}: Notifications for ID changes are
1582 * broadcast to everyone, to allow tracking peers.
1585 switch (msg->items[0].type) {
1586 case KDBUS_ITEM_NAME_ADD:
1587 case KDBUS_ITEM_NAME_REMOVE:
1588 case KDBUS_ITEM_NAME_CHANGE:
1589 return kdbus_conn_policy_see_name(conn, conn_creds,
1590 msg->items[0].name_change.name);
1592 case KDBUS_ITEM_ID_ADD:
1593 case KDBUS_ITEM_ID_REMOVE:
1597 WARN(1, "Invalid type for notification broadcast: %llu\n",
1598 (unsigned long long)msg->items[0].type);
1604 * kdbus_cmd_hello() - handle KDBUS_CMD_HELLO
1605 * @ep: Endpoint to operate on
1606 * @file: File this connection is opened on
1607 * @argp: Command payload
1609 * Return: NULL or newly created connection on success, ERR_PTR on failure.
1611 struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, struct file *file,
1614 struct kdbus_cmd_hello *cmd;
1615 struct kdbus_conn *c = NULL;
1616 const char *item_name;
1619 struct kdbus_arg argv[] = {
1620 { .type = KDBUS_ITEM_NEGOTIATE },
1621 { .type = KDBUS_ITEM_NAME },
1622 { .type = KDBUS_ITEM_CREDS },
1623 { .type = KDBUS_ITEM_PIDS },
1624 { .type = KDBUS_ITEM_SECLABEL },
1625 { .type = KDBUS_ITEM_CONN_DESCRIPTION },
1626 { .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
1628 struct kdbus_args args = {
1629 .allowed_flags = KDBUS_FLAG_NEGOTIATE |
1630 KDBUS_HELLO_ACCEPT_FD |
1631 KDBUS_HELLO_ACTIVATOR |
1632 KDBUS_HELLO_POLICY_HOLDER |
1633 KDBUS_HELLO_MONITOR,
1635 .argc = ARRAY_SIZE(argv),
1638 ret = kdbus_args_parse(&args, argp, &cmd);
1640 return ERR_PTR(ret);
1644 item_name = argv[1].item ? argv[1].item->str : NULL;
1646 c = kdbus_conn_new(ep, file, cmd, item_name,
1647 argv[2].item ? &argv[2].item->creds : NULL,
1648 argv[3].item ? &argv[3].item->pids : NULL,
1649 argv[4].item ? argv[4].item->str : NULL,
1650 argv[5].item ? argv[5].item->str : NULL);
1657 ret = kdbus_conn_connect(c, item_name);
1661 if (kdbus_conn_is_activator(c) || kdbus_conn_is_policy_holder(c)) {
1662 ret = kdbus_conn_acquire(c);
1666 ret = kdbus_policy_set(&c->ep->bus->policy_db, args.items,
1668 kdbus_conn_is_policy_holder(c), c);
1669 kdbus_conn_release(c);
1674 if (copy_to_user(argp, cmd, sizeof(*cmd)))
1678 ret = kdbus_args_clear(&args, ret);
1681 kdbus_conn_disconnect(c, false);
1682 kdbus_conn_unref(c);
1684 return ERR_PTR(ret);
1690 * kdbus_cmd_byebye_unlocked() - handle KDBUS_CMD_BYEBYE
1691 * @conn: connection to operate on
1692 * @argp: command payload
1694 * The caller must not hold any active reference to @conn or this will deadlock.
1696 * Return: >=0 on success, negative error code on failure.
1698 int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp)
1700 struct kdbus_cmd *cmd;
1703 struct kdbus_arg argv[] = {
1704 { .type = KDBUS_ITEM_NEGOTIATE },
1706 struct kdbus_args args = {
1707 .allowed_flags = KDBUS_FLAG_NEGOTIATE,
1709 .argc = ARRAY_SIZE(argv),
1712 if (!kdbus_conn_is_ordinary(conn))
1715 ret = kdbus_args_parse(&args, argp, &cmd);
1719 ret = kdbus_conn_disconnect(conn, true);
1720 return kdbus_args_clear(&args, ret);
1724 * kdbus_cmd_conn_info() - handle KDBUS_CMD_CONN_INFO
1725 * @conn: connection to operate on
1726 * @argp: command payload
1728 * Return: >=0 on success, negative error code on failure.
1730 int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp)
1732 struct kdbus_meta_conn *conn_meta = NULL;
1733 struct kdbus_pool_slice *slice = NULL;
1734 struct kdbus_name_entry *entry = NULL;
1735 struct kdbus_name_owner *owner = NULL;
1736 struct kdbus_conn *owner_conn = NULL;
1737 struct kdbus_item *meta_items = NULL;
1738 struct kdbus_info info = {};
1739 struct kdbus_cmd_info *cmd;
1740 struct kdbus_bus *bus = conn->ep->bus;
1741 struct kvec kvec[3];
1742 size_t meta_size, cnt = 0;
1744 u64 attach_flags, size = 0;
1747 struct kdbus_arg argv[] = {
1748 { .type = KDBUS_ITEM_NEGOTIATE },
1749 { .type = KDBUS_ITEM_NAME },
1751 struct kdbus_args args = {
1752 .allowed_flags = KDBUS_FLAG_NEGOTIATE,
1754 .argc = ARRAY_SIZE(argv),
1757 ret = kdbus_args_parse(&args, argp, &cmd);
1761 /* registry must be held throughout lookup *and* collecting data */
1762 down_read(&bus->name_registry->rwlock);
1764 ret = kdbus_sanitize_attach_flags(cmd->attach_flags, &attach_flags);
1768 name = argv[1].item ? argv[1].item->str : NULL;
1771 entry = kdbus_name_lookup_unlocked(bus->name_registry, name);
1773 owner = kdbus_name_get_owner(entry);
1775 !kdbus_conn_policy_see_name(conn, current_cred(), name) ||
1776 (cmd->id != 0 && owner->conn->id != cmd->id)) {
1777 /* pretend a name doesn't exist if you cannot see it */
1782 owner_conn = kdbus_conn_ref(owner->conn);
1783 } else if (cmd->id > 0) {
1784 owner_conn = kdbus_bus_find_conn_by_id(bus, cmd->id);
1785 if (!owner_conn || !kdbus_conn_policy_see(conn, current_cred(),
1787 /* pretend an id doesn't exist if you cannot see it */
1796 attach_flags &= atomic64_read(&owner_conn->attach_flags_send);
1798 conn_meta = kdbus_meta_conn_new();
1799 if (IS_ERR(conn_meta)) {
1800 ret = PTR_ERR(conn_meta);
1805 ret = kdbus_meta_conn_collect(conn_meta, owner_conn, 0, attach_flags);
1809 ret = kdbus_meta_emit(owner_conn->meta_proc, owner_conn->meta_fake,
1810 conn_meta, conn, attach_flags,
1811 &meta_items, &meta_size);
1815 info.id = owner_conn->id;
1816 info.flags = owner_conn->flags;
1818 kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &size);
1819 if (meta_size > 0) {
1820 kdbus_kvec_set(&kvec[cnt++], meta_items, meta_size, &size);
1821 cnt += !!kdbus_kvec_pad(&kvec[cnt], &size);
1826 slice = kdbus_pool_slice_alloc(conn->pool, size, false);
1827 if (IS_ERR(slice)) {
1828 ret = PTR_ERR(slice);
1833 ret = kdbus_pool_slice_copy_kvec(slice, 0, kvec, cnt, size);
1837 kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size);
1839 if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
1840 kdbus_member_set_user(&cmd->info_size, argp,
1841 typeof(*cmd), info_size)) {
1849 up_read(&bus->name_registry->rwlock);
1850 kdbus_pool_slice_release(slice);
1852 kdbus_meta_conn_unref(conn_meta);
1853 kdbus_conn_unref(owner_conn);
1854 return kdbus_args_clear(&args, ret);
1858 * kdbus_cmd_update() - handle KDBUS_CMD_UPDATE
1859 * @conn: connection to operate on
1860 * @argp: command payload
1862 * Return: >=0 on success, negative error code on failure.
1864 int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp)
1866 struct kdbus_item *item_policy;
1867 u64 *item_attach_send = NULL;
1868 u64 *item_attach_recv = NULL;
1869 struct kdbus_cmd *cmd;
1874 struct kdbus_arg argv[] = {
1875 { .type = KDBUS_ITEM_NEGOTIATE },
1876 { .type = KDBUS_ITEM_ATTACH_FLAGS_SEND },
1877 { .type = KDBUS_ITEM_ATTACH_FLAGS_RECV },
1878 { .type = KDBUS_ITEM_NAME, .multiple = true },
1879 { .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
1881 struct kdbus_args args = {
1882 .allowed_flags = KDBUS_FLAG_NEGOTIATE,
1884 .argc = ARRAY_SIZE(argv),
1887 ret = kdbus_args_parse(&args, argp, &cmd);
1891 item_attach_send = argv[1].item ? &argv[1].item->data64[0] : NULL;
1892 item_attach_recv = argv[2].item ? &argv[2].item->data64[0] : NULL;
1893 item_policy = argv[3].item ? : argv[4].item;
1895 if (item_attach_send) {
1896 if (!kdbus_conn_is_ordinary(conn) &&
1897 !kdbus_conn_is_monitor(conn)) {
1902 ret = kdbus_sanitize_attach_flags(*item_attach_send,
1908 if (item_attach_recv) {
1909 if (!kdbus_conn_is_ordinary(conn) &&
1910 !kdbus_conn_is_monitor(conn) &&
1911 !kdbus_conn_is_activator(conn)) {
1916 ret = kdbus_sanitize_attach_flags(*item_attach_recv,
1922 if (item_policy && !kdbus_conn_is_policy_holder(conn)) {
1927 /* now that we verified the input, update the connection */
1930 ret = kdbus_policy_set(&conn->ep->bus->policy_db, cmd->items,
1931 KDBUS_ITEMS_SIZE(cmd, items),
1937 if (item_attach_send)
1938 atomic64_set(&conn->attach_flags_send, attach_send);
1940 if (item_attach_recv)
1941 atomic64_set(&conn->attach_flags_recv, attach_recv);
1944 return kdbus_args_clear(&args, ret);
1948 * kdbus_cmd_send() - handle KDBUS_CMD_SEND
1949 * @conn: connection to operate on
1950 * @f: file this command was called on
1951 * @argp: command payload
1953 * Return: >=0 on success, negative error code on failure.
1955 int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp)
1957 struct kdbus_cmd_send *cmd;
1958 struct kdbus_staging *staging = NULL;
1959 struct kdbus_msg *msg = NULL;
1960 struct file *cancel_fd = NULL;
1963 /* command arguments */
1964 struct kdbus_arg argv[] = {
1965 { .type = KDBUS_ITEM_NEGOTIATE },
1966 { .type = KDBUS_ITEM_CANCEL_FD },
1968 struct kdbus_args args = {
1969 .allowed_flags = KDBUS_FLAG_NEGOTIATE |
1970 KDBUS_SEND_SYNC_REPLY,
1972 .argc = ARRAY_SIZE(argv),
1975 /* message arguments */
1976 struct kdbus_arg msg_argv[] = {
1977 { .type = KDBUS_ITEM_NEGOTIATE },
1978 { .type = KDBUS_ITEM_PAYLOAD_VEC, .multiple = true },
1979 { .type = KDBUS_ITEM_PAYLOAD_MEMFD, .multiple = true },
1980 { .type = KDBUS_ITEM_FDS },
1981 { .type = KDBUS_ITEM_BLOOM_FILTER },
1982 { .type = KDBUS_ITEM_DST_NAME },
1984 struct kdbus_args msg_args = {
1985 .allowed_flags = KDBUS_FLAG_NEGOTIATE |
1986 KDBUS_MSG_EXPECT_REPLY |
1987 KDBUS_MSG_NO_AUTO_START |
1990 .argc = ARRAY_SIZE(msg_argv),
1993 if (!kdbus_conn_is_ordinary(conn))
1996 /* make sure to parse both, @cmd and @msg on negotiation */
1998 ret = kdbus_args_parse(&args, argp, &cmd);
2001 else if (ret > 0 && !cmd->msg_address) /* negotiation without msg */
2004 ret2 = kdbus_args_parse_msg(&msg_args, KDBUS_PTR(cmd->msg_address),
2006 if (ret2 < 0) { /* cannot parse message */
2009 } else if (ret2 > 0 && !ret) { /* msg-negot implies cmd-negot */
2012 } else if (ret > 0) { /* negotiation */
2016 /* here we parsed both, @cmd and @msg, and neither wants negotiation */
2018 cmd->reply.return_flags = 0;
2019 kdbus_pool_publish_empty(conn->pool, &cmd->reply.offset,
2020 &cmd->reply.msg_size);
2023 cancel_fd = fget(argv[1].item->fds[0]);
2029 if (!cancel_fd->f_op->poll) {
2035 /* patch-in the source of this message */
2036 if (msg->src_id > 0 && msg->src_id != conn->id) {
2040 msg->src_id = conn->id;
2042 staging = kdbus_staging_new_user(conn->ep->bus, cmd, msg);
2043 if (IS_ERR(staging)) {
2044 ret = PTR_ERR(staging);
2049 if (msg->dst_id == KDBUS_DST_ID_BROADCAST) {
2050 down_read(&conn->ep->bus->name_registry->rwlock);
2051 kdbus_bus_broadcast(conn->ep->bus, conn, staging);
2052 up_read(&conn->ep->bus->name_registry->rwlock);
2053 } else if (cmd->flags & KDBUS_SEND_SYNC_REPLY) {
2054 struct kdbus_reply *r;
2057 exp = ns_to_ktime(msg->timeout_ns);
2058 r = kdbus_conn_call(conn, staging, exp);
2064 ret = kdbus_conn_wait_reply(conn, cmd, f, cancel_fd, r, exp);
2065 kdbus_reply_unref(r);
2068 } else if ((msg->flags & KDBUS_MSG_EXPECT_REPLY) ||
2069 msg->cookie_reply == 0) {
2070 ret = kdbus_conn_unicast(conn, staging);
2074 ret = kdbus_conn_reply(conn, staging);
2079 if (kdbus_member_set_user(&cmd->reply, argp, typeof(*cmd), reply))
2085 kdbus_staging_free(staging);
2086 ret = kdbus_args_clear(&msg_args, ret);
2087 return kdbus_args_clear(&args, ret);
2091 * kdbus_cmd_recv() - handle KDBUS_CMD_RECV
2092 * @conn: connection to operate on
2093 * @argp: command payload
2095 * Return: >=0 on success, negative error code on failure.
2097 int kdbus_cmd_recv(struct kdbus_conn *conn, void __user *argp)
2099 struct kdbus_queue_entry *entry;
2100 struct kdbus_cmd_recv *cmd;
2103 struct kdbus_arg argv[] = {
2104 { .type = KDBUS_ITEM_NEGOTIATE },
2106 struct kdbus_args args = {
2107 .allowed_flags = KDBUS_FLAG_NEGOTIATE |
2110 KDBUS_RECV_USE_PRIORITY,
2112 .argc = ARRAY_SIZE(argv),
2115 if (!kdbus_conn_is_ordinary(conn) &&
2116 !kdbus_conn_is_monitor(conn) &&
2117 !kdbus_conn_is_activator(conn))
2120 ret = kdbus_args_parse(&args, argp, &cmd);
2124 cmd->dropped_msgs = 0;
2125 cmd->msg.return_flags = 0;
2126 kdbus_pool_publish_empty(conn->pool, &cmd->msg.offset,
2127 &cmd->msg.msg_size);
2129 /* DROP+priority is not realiably, so prevent it */
2130 if ((cmd->flags & KDBUS_RECV_DROP) &&
2131 (cmd->flags & KDBUS_RECV_USE_PRIORITY)) {
2136 mutex_lock(&conn->lock);
2138 entry = kdbus_queue_peek(&conn->queue, cmd->priority,
2139 cmd->flags & KDBUS_RECV_USE_PRIORITY);
2141 mutex_unlock(&conn->lock);
2143 } else if (cmd->flags & KDBUS_RECV_DROP) {
2144 struct kdbus_reply *reply = kdbus_reply_ref(entry->reply);
2146 kdbus_queue_entry_free(entry);
2148 mutex_unlock(&conn->lock);
2151 mutex_lock(&reply->reply_dst->lock);
2152 if (!list_empty(&reply->entry)) {
2153 kdbus_reply_unlink(reply);
2155 kdbus_sync_reply_wakeup(reply, -EPIPE);
2157 kdbus_notify_reply_dead(conn->ep->bus,
2158 reply->reply_dst->id,
2161 mutex_unlock(&reply->reply_dst->lock);
2162 kdbus_notify_flush(conn->ep->bus);
2165 kdbus_reply_unref(reply);
2170 * PEEK just returns the location of the next message. Do not
2171 * install FDs nor memfds nor anything else. The only
2172 * information of interest should be the message header and
2173 * metadata. Any FD numbers in the payload is undefined for
2175 * Also make sure to never install fds into a connection that
2176 * has refused to receive any. Ordinary connections will not get
2177 * messages with FDs queued (the receiver will get -ECOMM), but
2178 * eavesdroppers might.
2180 install_fds = (conn->flags & KDBUS_HELLO_ACCEPT_FD) &&
2181 !(cmd->flags & KDBUS_RECV_PEEK);
2183 ret = kdbus_queue_entry_install(entry,
2184 &cmd->msg.return_flags,
2187 mutex_unlock(&conn->lock);
2191 kdbus_pool_slice_publish(entry->slice, &cmd->msg.offset,
2192 &cmd->msg.msg_size);
2194 if (!(cmd->flags & KDBUS_RECV_PEEK))
2195 kdbus_queue_entry_free(entry);
2197 mutex_unlock(&conn->lock);
2200 cmd->dropped_msgs = atomic_xchg(&conn->lost_count, 0);
2201 if (cmd->dropped_msgs > 0)
2202 cmd->return_flags |= KDBUS_RECV_RETURN_DROPPED_MSGS;
2204 if (kdbus_member_set_user(&cmd->msg, argp, typeof(*cmd), msg) ||
2205 kdbus_member_set_user(&cmd->dropped_msgs, argp, typeof(*cmd),
2210 return kdbus_args_clear(&args, ret);
2214 * kdbus_cmd_free() - handle KDBUS_CMD_FREE
2215 * @conn: connection to operate on
2216 * @argp: command payload
2218 * Return: >=0 on success, negative error code on failure.
2220 int kdbus_cmd_free(struct kdbus_conn *conn, void __user *argp)
2222 struct kdbus_cmd_free *cmd;
2225 struct kdbus_arg argv[] = {
2226 { .type = KDBUS_ITEM_NEGOTIATE },
2228 struct kdbus_args args = {
2229 .allowed_flags = KDBUS_FLAG_NEGOTIATE,
2231 .argc = ARRAY_SIZE(argv),
2234 if (!kdbus_conn_is_ordinary(conn) &&
2235 !kdbus_conn_is_monitor(conn) &&
2236 !kdbus_conn_is_activator(conn))
2239 ret = kdbus_args_parse(&args, argp, &cmd);
2243 ret = kdbus_pool_release_offset(conn->pool, cmd->offset);
2245 return kdbus_args_clear(&args, ret);