All of lore.kernel.org
 help / color / mirror / Atom feed
From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: arnd@arndb.de, ebiederm@xmission.com, gnomes@lxorguk.ukuu.org.uk,
	teg@jklm.no, jkosina@suse.cz, luto@amacapital.net,
	linux-api@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: daniel@zonque.org, dh.herrmann@gmail.com, tixxdz@opendz.org,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Subject: [PATCH 05/14] kdbus: add connection, queue handling and message validation code
Date: Mon,  9 Mar 2015 14:09:11 +0100	[thread overview]
Message-ID: <1425906560-13798-6-git-send-email-gregkh@linuxfoundation.org> (raw)
In-Reply-To: <1425906560-13798-1-git-send-email-gregkh@linuxfoundation.org>

From: Daniel Mack <daniel@zonque.org>

This patch adds code to create and destroy connections, to validate
incoming messages and to maintain the queue of messages that are
associated with a connection.

Note that connection and queue have a 1:1 relation, the code is only
split in two parts for cleaner separation and better readability.

Signed-off-by: Daniel Mack <daniel@zonque.org>
Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: Djalal Harouni <tixxdz@opendz.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
 ipc/kdbus/connection.c | 2215 ++++++++++++++++++++++++++++++++++++++++++++++++
 ipc/kdbus/connection.h |  257 ++++++
 ipc/kdbus/item.c       |  339 ++++++++
 ipc/kdbus/item.h       |   64 ++
 ipc/kdbus/message.c    |  616 ++++++++++++++
 ipc/kdbus/message.h    |  133 +++
 ipc/kdbus/queue.c      |  678 +++++++++++++++
 ipc/kdbus/queue.h      |   92 ++
 ipc/kdbus/reply.c      |  259 ++++++
 ipc/kdbus/reply.h      |   68 ++
 ipc/kdbus/util.h       |    2 +-
 11 files changed, 4722 insertions(+), 1 deletion(-)
 create mode 100644 ipc/kdbus/connection.c
 create mode 100644 ipc/kdbus/connection.h
 create mode 100644 ipc/kdbus/item.c
 create mode 100644 ipc/kdbus/item.h
 create mode 100644 ipc/kdbus/message.c
 create mode 100644 ipc/kdbus/message.h
 create mode 100644 ipc/kdbus/queue.c
 create mode 100644 ipc/kdbus/queue.h
 create mode 100644 ipc/kdbus/reply.c
 create mode 100644 ipc/kdbus/reply.h

diff --git a/ipc/kdbus/connection.c b/ipc/kdbus/connection.c
new file mode 100644
index 000000000000..e554f1a71aa1
--- /dev/null
+++ b/ipc/kdbus/connection.c
@@ -0,0 +1,2215 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/audit.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fs_struct.h>
+#include <linux/hashtable.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/path.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/shmem_fs.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/uio.h>
+
+#include "bus.h"
+#include "connection.h"
+#include "endpoint.h"
+#include "handle.h"
+#include "match.h"
+#include "message.h"
+#include "metadata.h"
+#include "names.h"
+#include "domain.h"
+#include "item.h"
+#include "notify.h"
+#include "policy.h"
+#include "pool.h"
+#include "reply.h"
+#include "util.h"
+#include "queue.h"
+
+#define KDBUS_CONN_ACTIVE_BIAS	(INT_MIN + 2)
+#define KDBUS_CONN_ACTIVE_NEW	(INT_MIN + 1)
+
+static struct kdbus_conn *kdbus_conn_new(struct kdbus_ep *ep, bool privileged,
+					 struct kdbus_cmd_hello *hello,
+					 const char *name,
+					 const struct kdbus_creds *creds,
+					 const struct kdbus_pids *pids,
+					 const char *seclabel,
+					 const char *conn_description)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	static struct lock_class_key __key;
+#endif
+	struct kdbus_pool_slice *slice = NULL;
+	struct kdbus_bus *bus = ep->bus;
+	struct kdbus_conn *conn;
+	u64 attach_flags_send;
+	u64 attach_flags_recv;
+	u64 items_size = 0;
+	bool is_policy_holder;
+	bool is_activator;
+	bool is_monitor;
+	struct kvec kvec;
+	int ret;
+
+	struct {
+		u64 size;
+		u64 type;
+		struct kdbus_bloom_parameter bloom;
+	} bloom_item;
+
+	is_monitor = hello->flags & KDBUS_HELLO_MONITOR;
+	is_activator = hello->flags & KDBUS_HELLO_ACTIVATOR;
+	is_policy_holder = hello->flags & KDBUS_HELLO_POLICY_HOLDER;
+
+	if (!hello->pool_size || !IS_ALIGNED(hello->pool_size, PAGE_SIZE))
+		return ERR_PTR(-EINVAL);
+	if (is_monitor + is_activator + is_policy_holder > 1)
+		return ERR_PTR(-EINVAL);
+	if (name && !is_activator && !is_policy_holder)
+		return ERR_PTR(-EINVAL);
+	if (!name && (is_activator || is_policy_holder))
+		return ERR_PTR(-EINVAL);
+	if (name && !kdbus_name_is_valid(name, true))
+		return ERR_PTR(-EINVAL);
+	if (is_monitor && ep->user)
+		return ERR_PTR(-EOPNOTSUPP);
+	if (!privileged && (is_activator || is_policy_holder || is_monitor))
+		return ERR_PTR(-EPERM);
+	if ((creds || pids || seclabel) && !privileged)
+		return ERR_PTR(-EPERM);
+
+	ret = kdbus_sanitize_attach_flags(hello->attach_flags_send,
+					  &attach_flags_send);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	ret = kdbus_sanitize_attach_flags(hello->attach_flags_recv,
+					  &attach_flags_recv);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	/* The attach flags must always satisfy the bus requirements. */
+	if (bus->attach_flags_req & ~attach_flags_send)
+		return ERR_PTR(-ECONNREFUSED);
+
+	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+	if (!conn)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&conn->kref);
+	atomic_set(&conn->active, KDBUS_CONN_ACTIVE_NEW);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	lockdep_init_map(&conn->dep_map, "s_active", &__key, 0);
+#endif
+	mutex_init(&conn->lock);
+	INIT_LIST_HEAD(&conn->names_list);
+	INIT_LIST_HEAD(&conn->names_queue_list);
+	INIT_LIST_HEAD(&conn->reply_list);
+	atomic_set(&conn->name_count, 0);
+	atomic_set(&conn->request_count, 0);
+	atomic_set(&conn->lost_count, 0);
+	INIT_DELAYED_WORK(&conn->work, kdbus_reply_list_scan_work);
+	conn->cred = get_current_cred();
+	init_waitqueue_head(&conn->wait);
+	kdbus_queue_init(&conn->queue);
+	conn->privileged = privileged;
+	conn->ep = kdbus_ep_ref(ep);
+	conn->id = atomic64_inc_return(&bus->domain->last_id);
+	conn->flags = hello->flags;
+	atomic64_set(&conn->attach_flags_send, attach_flags_send);
+	atomic64_set(&conn->attach_flags_recv, attach_flags_recv);
+	INIT_LIST_HEAD(&conn->monitor_entry);
+
+	if (conn_description) {
+		conn->description = kstrdup(conn_description, GFP_KERNEL);
+		if (!conn->description) {
+			ret = -ENOMEM;
+			goto exit_unref;
+		}
+	}
+
+	conn->pool = kdbus_pool_new(conn->description, hello->pool_size);
+	if (IS_ERR(conn->pool)) {
+		ret = PTR_ERR(conn->pool);
+		conn->pool = NULL;
+		goto exit_unref;
+	}
+
+	conn->match_db = kdbus_match_db_new();
+	if (IS_ERR(conn->match_db)) {
+		ret = PTR_ERR(conn->match_db);
+		conn->match_db = NULL;
+		goto exit_unref;
+	}
+
+	/* return properties of this connection to the caller */
+	hello->bus_flags = bus->bus_flags;
+	hello->id = conn->id;
+
+	BUILD_BUG_ON(sizeof(bus->id128) != sizeof(hello->id128));
+	memcpy(hello->id128, bus->id128, sizeof(hello->id128));
+
+	conn->meta = kdbus_meta_proc_new();
+	if (IS_ERR(conn->meta)) {
+		ret = PTR_ERR(conn->meta);
+		conn->meta = NULL;
+		goto exit_unref;
+	}
+
+	/* privileged processes can impersonate somebody else */
+	if (creds || pids || seclabel) {
+		ret = kdbus_meta_proc_fake(conn->meta, creds, pids, seclabel);
+		if (ret < 0)
+			goto exit_unref;
+
+		conn->faked_meta = true;
+	} else {
+		ret = kdbus_meta_proc_collect(conn->meta,
+					      KDBUS_ATTACH_CREDS |
+					      KDBUS_ATTACH_PIDS |
+					      KDBUS_ATTACH_AUXGROUPS |
+					      KDBUS_ATTACH_TID_COMM |
+					      KDBUS_ATTACH_PID_COMM |
+					      KDBUS_ATTACH_EXE |
+					      KDBUS_ATTACH_CMDLINE |
+					      KDBUS_ATTACH_CGROUP |
+					      KDBUS_ATTACH_CAPS |
+					      KDBUS_ATTACH_SECLABEL |
+					      KDBUS_ATTACH_AUDIT);
+		if (ret < 0)
+			goto exit_unref;
+	}
+
+	/*
+	 * Account the connection against the current user (UID), or for
+	 * custom endpoints use the anonymous user assigned to the endpoint.
+	 * Note that limits are always accounted against the real UID, not
+	 * the effective UID (cred->user always points to the accounting of
+	 * cred->uid, not cred->euid).
+	 */
+	if (ep->user) {
+		conn->user = kdbus_user_ref(ep->user);
+	} else {
+		conn->user = kdbus_user_lookup(ep->bus->domain, current_uid());
+		if (IS_ERR(conn->user)) {
+			ret = PTR_ERR(conn->user);
+			conn->user = NULL;
+			goto exit_unref;
+		}
+	}
+
+	if (atomic_inc_return(&conn->user->connections) > KDBUS_USER_MAX_CONN) {
+		/* decremented by destructor as conn->user is valid */
+		ret = -EMFILE;
+		goto exit_unref;
+	}
+
+	bloom_item.size = sizeof(bloom_item);
+	bloom_item.type = KDBUS_ITEM_BLOOM_PARAMETER;
+	bloom_item.bloom = bus->bloom;
+	kdbus_kvec_set(&kvec, &bloom_item, bloom_item.size, &items_size);
+
+	slice = kdbus_pool_slice_alloc(conn->pool, items_size, false);
+	if (IS_ERR(slice)) {
+		ret = PTR_ERR(slice);
+		slice = NULL;
+		goto exit_unref;
+	}
+
+	ret = kdbus_pool_slice_copy_kvec(slice, 0, &kvec, 1, items_size);
+	if (ret < 0)
+		goto exit_unref;
+
+	kdbus_pool_slice_publish(slice, &hello->offset, &hello->items_size);
+	kdbus_pool_slice_release(slice);
+
+	return conn;
+
+exit_unref:
+	kdbus_pool_slice_release(slice);
+	kdbus_conn_unref(conn);
+	return ERR_PTR(ret);
+}
+
+static void __kdbus_conn_free(struct kref *kref)
+{
+	struct kdbus_conn *conn = container_of(kref, struct kdbus_conn, kref);
+
+	WARN_ON(kdbus_conn_active(conn));
+	WARN_ON(delayed_work_pending(&conn->work));
+	WARN_ON(!list_empty(&conn->queue.msg_list));
+	WARN_ON(!list_empty(&conn->names_list));
+	WARN_ON(!list_empty(&conn->names_queue_list));
+	WARN_ON(!list_empty(&conn->reply_list));
+
+	if (conn->user) {
+		atomic_dec(&conn->user->connections);
+		kdbus_user_unref(conn->user);
+	}
+
+	kdbus_meta_proc_unref(conn->meta);
+	kdbus_match_db_free(conn->match_db);
+	kdbus_pool_free(conn->pool);
+	kdbus_ep_unref(conn->ep);
+	put_cred(conn->cred);
+	kfree(conn->description);
+	kfree(conn->quota);
+	kfree(conn);
+}
+
+/**
+ * kdbus_conn_ref() - take a connection reference
+ * @conn:		Connection, may be %NULL
+ *
+ * Return: the connection itself
+ */
+struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn)
+{
+	if (conn)
+		kref_get(&conn->kref);
+	return conn;
+}
+
+/**
+ * kdbus_conn_unref() - drop a connection reference
+ * @conn:		Connection (may be NULL)
+ *
+ * When the last reference is dropped, the connection's internal structure
+ * is freed.
+ *
+ * Return: NULL
+ */
+struct kdbus_conn *kdbus_conn_unref(struct kdbus_conn *conn)
+{
+	if (conn)
+		kref_put(&conn->kref, __kdbus_conn_free);
+	return NULL;
+}
+
+/**
+ * kdbus_conn_active() - connection is not disconnected
+ * @conn:		Connection to check
+ *
+ * Return true if the connection was not disconnected, yet. Note that a
+ * connection might be disconnected asynchronously, unless you hold the
+ * connection lock. If that's not suitable for you, see kdbus_conn_acquire() to
+ * suppress connection shutdown for a short period.
+ *
+ * Return: true if the connection is still active
+ */
+bool kdbus_conn_active(const struct kdbus_conn *conn)
+{
+	return atomic_read(&conn->active) >= 0;
+}
+
+/**
+ * kdbus_conn_acquire() - acquire an active connection reference
+ * @conn:		Connection
+ *
+ * Users can close a connection via KDBUS_BYEBYE (or by destroying the
+ * endpoint/bus/...) at any time. Whenever this happens, we should deny any
+ * user-visible action on this connection and signal ECONNRESET instead.
+ * To avoid testing for connection availability everytime you take the
+ * connection-lock, you can acquire a connection for short periods.
+ *
+ * By calling kdbus_conn_acquire(), you gain an "active reference" to the
+ * connection. You must also hold a regular reference at any time! As long as
+ * you hold the active-ref, the connection will not be shut down. However, if
+ * the connection was shut down, you can never acquire an active-ref again.
+ *
+ * kdbus_conn_disconnect() disables the connection and then waits for all active
+ * references to be dropped. It will also wake up any pending operation.
+ * However, you must not sleep for an indefinite period while holding an
+ * active-reference. Otherwise, kdbus_conn_disconnect() might stall. If you need
+ * to sleep for an indefinite period, either release the reference and try to
+ * acquire it again after waking up, or make kdbus_conn_disconnect() wake up
+ * your wait-queue.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_conn_acquire(struct kdbus_conn *conn)
+{
+	if (!atomic_inc_unless_negative(&conn->active))
+		return -ECONNRESET;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	rwsem_acquire_read(&conn->dep_map, 0, 1, _RET_IP_);
+#endif
+
+	return 0;
+}
+
+/**
+ * kdbus_conn_release() - release an active connection reference
+ * @conn:		Connection
+ *
+ * This releases an active reference that has been acquired via
+ * kdbus_conn_acquire(). If the connection was already disabled and this is the
+ * last active-ref that is dropped, the disconnect-waiter will be woken up and
+ * properly close the connection.
+ */
+void kdbus_conn_release(struct kdbus_conn *conn)
+{
+	int v;
+
+	if (!conn)
+		return;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	rwsem_release(&conn->dep_map, 1, _RET_IP_);
+#endif
+
+	v = atomic_dec_return(&conn->active);
+	if (v != KDBUS_CONN_ACTIVE_BIAS)
+		return;
+
+	wake_up_all(&conn->wait);
+}
+
+static int kdbus_conn_connect(struct kdbus_conn *conn, const char *name)
+{
+	struct kdbus_ep *ep = conn->ep;
+	struct kdbus_bus *bus = ep->bus;
+	int ret;
+
+	if (WARN_ON(atomic_read(&conn->active) != KDBUS_CONN_ACTIVE_NEW))
+		return -EALREADY;
+
+	/* make sure the ep-node is active while we add our connection */
+	if (!kdbus_node_acquire(&ep->node))
+		return -ESHUTDOWN;
+
+	/* lock order: domain -> bus -> ep -> names -> conn */
+	mutex_lock(&ep->lock);
+	down_write(&bus->conn_rwlock);
+
+	/* link into monitor list */
+	if (kdbus_conn_is_monitor(conn))
+		list_add_tail(&conn->monitor_entry, &bus->monitors_list);
+
+	/* link into bus and endpoint */
+	list_add_tail(&conn->ep_entry, &ep->conn_list);
+	hash_add(bus->conn_hash, &conn->hentry, conn->id);
+
+	/* enable lookups and acquire active ref */
+	atomic_set(&conn->active, 1);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	rwsem_acquire_read(&conn->dep_map, 0, 1, _RET_IP_);
+#endif
+
+	up_write(&bus->conn_rwlock);
+	mutex_unlock(&ep->lock);
+
+	kdbus_node_release(&ep->node);
+
+	/*
+	 * Notify subscribers about the new active connection, unless it is
+	 * a monitor. Monitors are invisible on the bus, can't be addressed
+	 * directly, and won't cause any notifications.
+	 */
+	if (!kdbus_conn_is_monitor(conn)) {
+		ret = kdbus_notify_id_change(conn->ep->bus, KDBUS_ITEM_ID_ADD,
+					     conn->id, conn->flags);
+		if (ret < 0)
+			goto exit_disconnect;
+	}
+
+	if (kdbus_conn_is_activator(conn)) {
+		u64 flags = KDBUS_NAME_ACTIVATOR;
+
+		if (WARN_ON(!name)) {
+			ret = -EINVAL;
+			goto exit_disconnect;
+		}
+
+		ret = kdbus_name_acquire(bus->name_registry, conn, name,
+					 flags, NULL);
+		if (ret < 0)
+			goto exit_disconnect;
+	}
+
+	kdbus_conn_release(conn);
+	kdbus_notify_flush(bus);
+	return 0;
+
+exit_disconnect:
+	kdbus_conn_release(conn);
+	kdbus_conn_disconnect(conn, false);
+	return ret;
+}
+
+/**
+ * kdbus_conn_disconnect() - disconnect a connection
+ * @conn:		The connection to disconnect
+ * @ensure_queue_empty:	Flag to indicate if the call should fail in
+ *			case the connection's message list is not
+ *			empty
+ *
+ * If @ensure_msg_list_empty is true, and the connection has pending messages,
+ * -EBUSY is returned.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int kdbus_conn_disconnect(struct kdbus_conn *conn, bool ensure_queue_empty)
+{
+	struct kdbus_queue_entry *entry, *tmp;
+	struct kdbus_bus *bus = conn->ep->bus;
+	struct kdbus_reply *r, *r_tmp;
+	struct kdbus_conn *c;
+	int i, v;
+
+	mutex_lock(&conn->lock);
+	v = atomic_read(&conn->active);
+	if (v == KDBUS_CONN_ACTIVE_NEW) {
+		/* was never connected */
+		mutex_unlock(&conn->lock);
+		return 0;
+	}
+	if (v < 0) {
+		/* already dead */
+		mutex_unlock(&conn->lock);
+		return -ECONNRESET;
+	}
+	if (ensure_queue_empty && !list_empty(&conn->queue.msg_list)) {
+		/* still busy */
+		mutex_unlock(&conn->lock);
+		return -EBUSY;
+	}
+
+	atomic_add(KDBUS_CONN_ACTIVE_BIAS, &conn->active);
+	mutex_unlock(&conn->lock);
+
+	wake_up_interruptible(&conn->wait);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	rwsem_acquire(&conn->dep_map, 0, 0, _RET_IP_);
+	if (atomic_read(&conn->active) != KDBUS_CONN_ACTIVE_BIAS)
+		lock_contended(&conn->dep_map, _RET_IP_);
+#endif
+
+	wait_event(conn->wait,
+		   atomic_read(&conn->active) == KDBUS_CONN_ACTIVE_BIAS);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	lock_acquired(&conn->dep_map, _RET_IP_);
+	rwsem_release(&conn->dep_map, 1, _RET_IP_);
+#endif
+
+	cancel_delayed_work_sync(&conn->work);
+	kdbus_policy_remove_owner(&conn->ep->bus->policy_db, conn);
+
+	/* lock order: domain -> bus -> ep -> names -> conn */
+	mutex_lock(&conn->ep->lock);
+	down_write(&bus->conn_rwlock);
+
+	/* remove from bus and endpoint */
+	hash_del(&conn->hentry);
+	list_del(&conn->monitor_entry);
+	list_del(&conn->ep_entry);
+
+	up_write(&bus->conn_rwlock);
+	mutex_unlock(&conn->ep->lock);
+
+	/*
+	 * Remove all names associated with this connection; this possibly
+	 * moves queued messages back to the activator connection.
+	 */
+	kdbus_name_release_all(bus->name_registry, conn);
+
+	/* if we die while other connections wait for our reply, notify them */
+	mutex_lock(&conn->lock);
+	list_for_each_entry_safe(entry, tmp, &conn->queue.msg_list, entry) {
+		if (entry->reply)
+			kdbus_notify_reply_dead(bus,
+						entry->reply->reply_dst->id,
+						entry->reply->cookie);
+		kdbus_queue_entry_free(entry);
+	}
+
+	list_for_each_entry_safe(r, r_tmp, &conn->reply_list, entry)
+		kdbus_reply_unlink(r);
+	mutex_unlock(&conn->lock);
+
+	/* lock order: domain -> bus -> ep -> names -> conn */
+	down_read(&bus->conn_rwlock);
+	hash_for_each(bus->conn_hash, i, c, hentry) {
+		mutex_lock(&c->lock);
+		list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
+			if (r->reply_src == conn) {
+				if (r->sync) {
+					kdbus_sync_reply_wakeup(r, -EPIPE);
+					kdbus_reply_unlink(r);
+					continue;
+				}
+
+				/* send a 'connection dead' notification */
+				kdbus_notify_reply_dead(bus, c->id, r->cookie);
+				kdbus_reply_unlink(r);
+			}
+		}
+		mutex_unlock(&c->lock);
+	}
+	up_read(&bus->conn_rwlock);
+
+	if (!kdbus_conn_is_monitor(conn))
+		kdbus_notify_id_change(bus, KDBUS_ITEM_ID_REMOVE,
+				       conn->id, conn->flags);
+
+	kdbus_notify_flush(bus);
+
+	return 0;
+}
+
+/**
+ * kdbus_conn_has_name() - check if a connection owns a name
+ * @conn:		Connection
+ * @name:		Well-know name to check for
+ *
+ * The caller must hold the registry lock of conn->ep->bus.
+ *
+ * Return: true if the name is currently owned by the connection
+ */
+bool kdbus_conn_has_name(struct kdbus_conn *conn, const char *name)
+{
+	struct kdbus_name_entry *e;
+
+	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
+
+	list_for_each_entry(e, &conn->names_list, conn_entry)
+		if (strcmp(e->name, name) == 0)
+			return true;
+
+	return false;
+}
+
+struct kdbus_quota {
+	uint32_t memory;
+	uint16_t msgs;
+	uint8_t fds;
+};
+
+/**
+ * kdbus_conn_quota_inc() - increase quota accounting
+ * @c:		connection owning the quota tracking
+ * @u:		user to account for (or NULL for kernel accounting)
+ * @memory:	size of memory to account for
+ * @fds:	number of FDs to account for
+ *
+ * This call manages the quotas on resource @c. That is, it's used if other
+ * users want to use the resources of connection @c, which so far only concerns
+ * the receive queue of the destination.
+ *
+ * This increases the quota-accounting for user @u by @memory bytes and @fds
+ * file descriptors. If the user has already reached the quota limits, this call
+ * will not do any accounting but return a negative error code indicating the
+ * failure.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
+			 size_t memory, size_t fds)
+{
+	struct kdbus_quota *quota;
+	size_t available, accounted;
+	unsigned int id;
+
+	/*
+	 * Pool Layout:
+	 * 50% of a pool is always owned by the connection. It is reserved for
+	 * kernel queries, handling received messages and other tasks that are
+	 * under control of the pool owner. The other 50% of the pool are used
+	 * as incoming queue.
+	 * As we optionally support user-space based policies, we need fair
+	 * allocation schemes. Furthermore, resource utilization should be
+	 * maximized, so only minimal resources stay reserved. However, we need
+	 * to adapt to a dynamic number of users, as we cannot know how many
+	 * users will talk to a connection. Therefore, the current allocations
+	 * works like this:
+	 * We limit the number of bytes in a destination's pool per sending
+	 * user. The space available for a user is 33% of the unused pool space
+	 * (whereas the space used by the user itself is also treated as
+	 * 'unused'). This way, we favor users coming first, but keep enough
+	 * pool space available for any following users. Given that messages are
+	 * dequeued in FIFO order, this should balance nicely if the number of
+	 * users grows. At the same time, this algorithm guarantees that the
+	 * space available to a connection is reduced dynamically, the more
+	 * concurrent users talk to a connection.
+	 */
+
+	/* per user-accounting is expensive, so we keep state small */
+	BUILD_BUG_ON(sizeof(quota->memory) != 4);
+	BUILD_BUG_ON(sizeof(quota->msgs) != 2);
+	BUILD_BUG_ON(sizeof(quota->fds) != 1);
+	BUILD_BUG_ON(KDBUS_CONN_MAX_MSGS > U16_MAX);
+	BUILD_BUG_ON(KDBUS_CONN_MAX_FDS_PER_USER > U8_MAX);
+
+	id = u ? u->id : KDBUS_USER_KERNEL_ID;
+	if (id >= c->n_quota) {
+		unsigned int users;
+
+		users = max(KDBUS_ALIGN8(id) + 8, id);
+		quota = krealloc(c->quota, users * sizeof(*quota),
+				 GFP_KERNEL | __GFP_ZERO);
+		if (!quota)
+			return -ENOMEM;
+
+		c->n_quota = users;
+		c->quota = quota;
+	}
+
+	quota = &c->quota[id];
+	kdbus_pool_accounted(c->pool, &available, &accounted);
+
+	/* half the pool is _always_ reserved for the pool owner */
+	available /= 2;
+
+	/*
+	 * Pool owner slices are un-accounted slices; they can claim more
+	 * than 50% of the queue. However, the slice we're dealing with here
+	 * belong to the incoming queue, hence they are 'accounted' slices
+	 * to which the 50%-limit applies.
+	 */
+	if (available < accounted)
+		return -ENOBUFS;
+
+	/* 1/3 of the remaining space (including your own memory) */
+	available = (available - accounted + quota->memory) / 3;
+
+	if (available < quota->memory ||
+	    available - quota->memory < memory ||
+	    quota->memory + memory > U32_MAX)
+		return -ENOBUFS;
+	if (quota->msgs >= KDBUS_CONN_MAX_MSGS)
+		return -ENOBUFS;
+	if (quota->fds + fds < quota->fds ||
+	    quota->fds + fds > KDBUS_CONN_MAX_FDS_PER_USER)
+		return -EMFILE;
+
+	quota->memory += memory;
+	quota->fds += fds;
+	++quota->msgs;
+	return 0;
+}
+
+/**
+ * kdbus_conn_quota_dec() - decrease quota accounting
+ * @c:		connection owning the quota tracking
+ * @u:		user which was accounted for (or NULL for kernel accounting)
+ * @memory:	size of memory which was accounted for
+ * @fds:	number of FDs which were accounted for
+ *
+ * This does the reverse of kdbus_conn_quota_inc(). You have to release any
+ * accounted resources that you called kdbus_conn_quota_inc() for. However, you
+ * must not call kdbus_conn_quota_dec() if the accounting failed (that is,
+ * kdbus_conn_quota_inc() failed).
+ */
+void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
+			  size_t memory, size_t fds)
+{
+	struct kdbus_quota *quota;
+	unsigned int id;
+
+	id = u ? u->id : KDBUS_USER_KERNEL_ID;
+	if (WARN_ON(id >= c->n_quota))
+		return;
+
+	quota = &c->quota[id];
+
+	if (!WARN_ON(quota->msgs == 0))
+		--quota->msgs;
+	if (!WARN_ON(quota->memory < memory))
+		quota->memory -= memory;
+	if (!WARN_ON(quota->fds < fds))
+		quota->fds -= fds;
+}
+
+/**
+ * kdbus_conn_lost_message() - handle lost messages
+ * @c:		connection that lost a message
+ *
+ * kdbus is reliable. That means, we try hard to never lose messages. However,
+ * memory is limited, so we cannot rely on transmissions to never fail.
+ * Therefore, we use quota-limits to let callers know if there unicast message
+ * cannot be transmitted to a peer. This works fine for unicasts, but for
+ * broadcasts we cannot make the caller handle the transmission failure.
+ * Instead, we must let the destination know that it couldn't receive a
+ * broadcast.
+ * As this is an unlikely scenario, we keep it simple. A single lost-counter
+ * remembers the number of lost messages since the last call to RECV. The next
+ * message retrieval will notify the connection that it lost messages since the
+ * last message retrieval and thus should resync its state.
+ */
+void kdbus_conn_lost_message(struct kdbus_conn *c)
+{
+	if (atomic_inc_return(&c->lost_count) == 1)
+		wake_up_interruptible(&c->wait);
+}
+
+/* Callers should take the conn_dst lock */
+static struct kdbus_queue_entry *
+kdbus_conn_entry_make(struct kdbus_conn *conn_dst,
+		      const struct kdbus_kmsg *kmsg,
+		      struct kdbus_user *user)
+{
+	struct kdbus_queue_entry *entry;
+
+	/* The remote connection was disconnected */
+	if (!kdbus_conn_active(conn_dst))
+		return ERR_PTR(-ECONNRESET);
+
+	/*
+	 * If the connection does not accept file descriptors but the message
+	 * has some attached, refuse it.
+	 *
+	 * If this is a monitor connection, accept the message. In that
+	 * case, all file descriptors will be set to -1 at receive time.
+	 */
+	if (!kdbus_conn_is_monitor(conn_dst) &&
+	    !(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
+	    kmsg->res && kmsg->res->fds_count > 0)
+		return ERR_PTR(-ECOMM);
+
+	entry = kdbus_queue_entry_new(conn_dst, kmsg, user);
+	if (IS_ERR(entry))
+		return entry;
+
+	return entry;
+}
+
+/*
+ * Synchronously responding to a message, allocate a queue entry
+ * and attach it to the reply tracking object.
+ * The connection's queue will never get to see it.
+ */
+static int kdbus_conn_entry_sync_attach(struct kdbus_conn *conn_dst,
+					const struct kdbus_kmsg *kmsg,
+					struct kdbus_reply *reply_wake)
+{
+	struct kdbus_queue_entry *entry;
+	int remote_ret;
+	int ret = 0;
+
+	mutex_lock(&reply_wake->reply_dst->lock);
+
+	/*
+	 * If we are still waiting then proceed, allocate a queue
+	 * entry and attach it to the reply object
+	 */
+	if (reply_wake->waiting) {
+		entry = kdbus_conn_entry_make(conn_dst, kmsg,
+					      reply_wake->reply_src->user);
+		if (IS_ERR(entry))
+			ret = PTR_ERR(entry);
+		else
+			/* Attach the entry to the reply object */
+			reply_wake->queue_entry = entry;
+	} else {
+		ret = -ECONNRESET;
+	}
+
+	/*
+	 * Update the reply object and wake up remote peer only
+	 * on appropriate return codes
+	 *
+	 * * -ECOMM: if the replying connection failed with -ECOMM
+	 *           then wakeup remote peer with -EREMOTEIO
+	 *
+	 *           We do this to differenciate between -ECOMM errors
+	 *           from the original sender perspective:
+	 *           -ECOMM error during the sync send and
+	 *           -ECOMM error during the sync reply, this last
+	 *           one is rewritten to -EREMOTEIO
+	 *
+	 * * Wake up on all other return codes.
+	 */
+	remote_ret = ret;
+
+	if (ret == -ECOMM)
+		remote_ret = -EREMOTEIO;
+
+	kdbus_sync_reply_wakeup(reply_wake, remote_ret);
+	kdbus_reply_unlink(reply_wake);
+	mutex_unlock(&reply_wake->reply_dst->lock);
+
+	return ret;
+}
+
+/**
+ * kdbus_conn_entry_insert() - enqueue a message into the receiver's pool
+ * @conn_src:		The sending connection
+ * @conn_dst:		The connection to queue into
+ * @kmsg:		The kmsg to queue
+ * @reply:		The reply tracker to attach to the queue entry
+ *
+ * Return: 0 on success. negative error otherwise.
+ */
+int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
+			    struct kdbus_conn *conn_dst,
+			    const struct kdbus_kmsg *kmsg,
+			    struct kdbus_reply *reply)
+{
+	struct kdbus_queue_entry *entry;
+	int ret;
+
+	kdbus_conn_lock2(conn_src, conn_dst);
+
+	entry = kdbus_conn_entry_make(conn_dst, kmsg,
+				      conn_src ? conn_src->user : NULL);
+	if (IS_ERR(entry)) {
+		ret = PTR_ERR(entry);
+		goto exit_unlock;
+	}
+
+	if (reply) {
+		kdbus_reply_link(reply);
+		if (!reply->sync)
+			schedule_delayed_work(&conn_src->work, 0);
+	}
+
+	kdbus_queue_entry_enqueue(entry, reply);
+	wake_up_interruptible(&conn_dst->wait);
+
+	ret = 0;
+
+exit_unlock:
+	kdbus_conn_unlock2(conn_src, conn_dst);
+	return ret;
+}
+
+static int kdbus_conn_wait_reply(struct kdbus_conn *conn_src,
+				 struct kdbus_cmd_send *cmd_send,
+				 struct file *ioctl_file,
+				 struct file *cancel_fd,
+				 struct kdbus_reply *reply_wait,
+				 ktime_t expire)
+{
+	struct kdbus_queue_entry *entry;
+	struct poll_wqueues pwq = {};
+	int ret;
+
+	if (WARN_ON(!reply_wait))
+		return -EIO;
+
+	/*
+	 * Block until the reply arrives. reply_wait is left untouched
+	 * by the timeout scans that might be conducted for other,
+	 * asynchronous replies of conn_src.
+	 */
+
+	poll_initwait(&pwq);
+	poll_wait(ioctl_file, &conn_src->wait, &pwq.pt);
+
+	for (;;) {
+		/*
+		 * Any of the following conditions will stop our synchronously
+		 * blocking SEND command:
+		 *
+		 * a) The origin sender closed its connection
+		 * b) The remote peer answered, setting reply_wait->waiting = 0
+		 * c) The cancel FD was written to
+		 * d) A signal was received
+		 * e) The specified timeout was reached, and none of the above
+		 *    conditions kicked in.
+		 */
+
+		/*
+		 * We have already acquired an active reference when
+		 * entering here, but another thread may call
+		 * KDBUS_CMD_BYEBYE which does not acquire an active
+		 * reference, therefore kdbus_conn_disconnect() will
+		 * not wait for us.
+		 */
+		if (!kdbus_conn_active(conn_src)) {
+			ret = -ECONNRESET;
+			break;
+		}
+
+		/*
+		 * After the replying peer unset the waiting variable
+		 * it will wake up us.
+		 */
+		if (!reply_wait->waiting) {
+			ret = reply_wait->err;
+			break;
+		}
+
+		if (cancel_fd) {
+			unsigned int r;
+
+			r = cancel_fd->f_op->poll(cancel_fd, &pwq.pt);
+			if (r & POLLIN) {
+				ret = -ECANCELED;
+				break;
+			}
+		}
+
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
+		if (!poll_schedule_timeout(&pwq, TASK_INTERRUPTIBLE,
+					   &expire, 0)) {
+			ret = -ETIMEDOUT;
+			break;
+		}
+
+		/*
+		 * Reset the poll worker func, so the waitqueues are not
+		 * added to the poll table again. We just reuse what we've
+		 * collected earlier for further iterations.
+		 */
+		init_poll_funcptr(&pwq.pt, NULL);
+	}
+
+	poll_freewait(&pwq);
+
+	if (ret == -EINTR) {
+		/*
+		 * Interrupted system call. Unref the reply object, and pass
+		 * the return value down the chain. Mark the reply as
+		 * interrupted, so the cleanup work can remove it, but do not
+		 * unlink it from the list. Once the syscall restarts, we'll
+		 * pick it up and wait on it again.
+		 */
+		mutex_lock(&conn_src->lock);
+		reply_wait->interrupted = true;
+		schedule_delayed_work(&conn_src->work, 0);
+		mutex_unlock(&conn_src->lock);
+
+		return -ERESTARTSYS;
+	}
+
+	mutex_lock(&conn_src->lock);
+	reply_wait->waiting = false;
+	entry = reply_wait->queue_entry;
+	if (entry) {
+		ret = kdbus_queue_entry_install(entry,
+						&cmd_send->reply.return_flags,
+						true);
+		kdbus_pool_slice_publish(entry->slice, &cmd_send->reply.offset,
+					 &cmd_send->reply.msg_size);
+		kdbus_queue_entry_free(entry);
+	}
+	kdbus_reply_unlink(reply_wait);
+	mutex_unlock(&conn_src->lock);
+
+	return ret;
+}
+
+static int kdbus_pin_dst(struct kdbus_bus *bus,
+			 struct kdbus_kmsg *kmsg,
+			 struct kdbus_name_entry **out_name,
+			 struct kdbus_conn **out_dst)
+{
+	struct kdbus_msg_resources *res = kmsg->res;
+	struct kdbus_name_entry *name = NULL;
+	struct kdbus_conn *dst = NULL;
+	struct kdbus_msg *msg = &kmsg->msg;
+	int ret;
+
+	if (WARN_ON(!res))
+		return -EINVAL;
+
+	lockdep_assert_held(&bus->name_registry->rwlock);
+
+	if (!res->dst_name) {
+		dst = kdbus_bus_find_conn_by_id(bus, msg->dst_id);
+		if (!dst)
+			return -ENXIO;
+
+		if (!kdbus_conn_is_ordinary(dst)) {
+			ret = -ENXIO;
+			goto error;
+		}
+	} else {
+		name = kdbus_name_lookup_unlocked(bus->name_registry,
+						  res->dst_name);
+		if (!name)
+			return -ESRCH;
+
+		/*
+		 * If both a name and a connection ID are given as destination
+		 * of a message, check that the currently owning connection of
+		 * the name matches the specified ID.
+		 * This way, we allow userspace to send the message to a
+		 * specific connection by ID only if the connection currently
+		 * owns the given name.
+		 */
+		if (msg->dst_id != KDBUS_DST_ID_NAME &&
+		    msg->dst_id != name->conn->id)
+			return -EREMCHG;
+
+		if (!name->conn && name->activator)
+			dst = kdbus_conn_ref(name->activator);
+		else
+			dst = kdbus_conn_ref(name->conn);
+
+		if ((msg->flags & KDBUS_MSG_NO_AUTO_START) &&
+		    kdbus_conn_is_activator(dst)) {
+			ret = -EADDRNOTAVAIL;
+			goto error;
+		}
+
+		/*
+		 * Record the sequence number of the registered name; it will
+		 * be passed on to the queue, in case messages addressed to a
+		 * name need to be moved from or to an activator.
+		 */
+		kmsg->dst_name_id = name->name_id;
+	}
+
+	*out_name = name;
+	*out_dst = dst;
+	return 0;
+
+error:
+	kdbus_conn_unref(dst);
+	return ret;
+}
+
+static int kdbus_conn_reply(struct kdbus_conn *src, struct kdbus_kmsg *kmsg)
+{
+	struct kdbus_name_entry *name = NULL;
+	struct kdbus_reply *reply, *wake = NULL;
+	struct kdbus_conn *dst = NULL;
+	struct kdbus_bus *bus = src->ep->bus;
+	u64 attach;
+	int ret;
+
+	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
+	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) ||
+	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_SIGNAL))
+		return -EINVAL;
+
+	/* name-registry must be locked for lookup *and* collecting data */
+	down_read(&bus->name_registry->rwlock);
+
+	/* find and pin destination */
+
+	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
+	if (ret < 0)
+		goto exit;
+
+	mutex_lock(&dst->lock);
+	reply = kdbus_reply_find(src, dst, kmsg->msg.cookie_reply);
+	if (reply) {
+		if (reply->sync)
+			wake = kdbus_reply_ref(reply);
+		kdbus_reply_unlink(reply);
+	}
+	mutex_unlock(&dst->lock);
+
+	if (!reply) {
+		ret = -EPERM;
+		goto exit;
+	}
+
+	/* attach metadata */
+
+	attach = kdbus_meta_calc_attach_flags(src, dst);
+
+	if (!src->faked_meta) {
+		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
+		if (ret < 0)
+			goto exit;
+	}
+
+	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
+	if (ret < 0)
+		goto exit;
+
+	/* send message */
+
+	kdbus_bus_eavesdrop(bus, src, kmsg);
+
+	if (wake)
+		ret = kdbus_conn_entry_sync_attach(dst, kmsg, wake);
+	else
+		ret = kdbus_conn_entry_insert(src, dst, kmsg, NULL);
+
+exit:
+	up_read(&bus->name_registry->rwlock);
+	kdbus_reply_unref(wake);
+	kdbus_conn_unref(dst);
+	return ret;
+}
+
+static struct kdbus_reply *kdbus_conn_call(struct kdbus_conn *src,
+					   struct kdbus_kmsg *kmsg,
+					   ktime_t exp)
+{
+	struct kdbus_name_entry *name = NULL;
+	struct kdbus_reply *wait = NULL;
+	struct kdbus_conn *dst = NULL;
+	struct kdbus_bus *bus = src->ep->bus;
+	u64 attach;
+	int ret;
+
+	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
+	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_SIGNAL) ||
+	    WARN_ON(!(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY)))
+		return ERR_PTR(-EINVAL);
+
+	/* resume previous wait-context, if available */
+
+	mutex_lock(&src->lock);
+	wait = kdbus_reply_find(NULL, src, kmsg->msg.cookie);
+	if (wait) {
+		if (wait->interrupted) {
+			kdbus_reply_ref(wait);
+			wait->interrupted = false;
+		} else {
+			wait = NULL;
+		}
+	}
+	mutex_unlock(&src->lock);
+
+	if (wait)
+		return wait;
+
+	if (ktime_compare(ktime_get(), exp) >= 0)
+		return ERR_PTR(-ETIMEDOUT);
+
+	/* name-registry must be locked for lookup *and* collecting data */
+	down_read(&bus->name_registry->rwlock);
+
+	/* find and pin destination */
+
+	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
+	if (ret < 0)
+		goto exit;
+
+	if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
+		ret = -EPERM;
+		goto exit;
+	}
+
+	wait = kdbus_reply_new(dst, src, &kmsg->msg, name, true);
+	if (IS_ERR(wait)) {
+		ret = PTR_ERR(wait);
+		wait = NULL;
+		goto exit;
+	}
+
+	/* attach metadata */
+
+	attach = kdbus_meta_calc_attach_flags(src, dst);
+
+	if (!src->faked_meta) {
+		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
+		if (ret < 0)
+			goto exit;
+	}
+
+	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
+	if (ret < 0)
+		goto exit;
+
+	/* send message */
+
+	kdbus_bus_eavesdrop(bus, src, kmsg);
+
+	ret = kdbus_conn_entry_insert(src, dst, kmsg, wait);
+	if (ret < 0)
+		goto exit;
+
+	ret = 0;
+
+exit:
+	up_read(&bus->name_registry->rwlock);
+	if (ret < 0) {
+		kdbus_reply_unref(wait);
+		wait = ERR_PTR(ret);
+	}
+	kdbus_conn_unref(dst);
+	return wait;
+}
+
+static int kdbus_conn_unicast(struct kdbus_conn *src, struct kdbus_kmsg *kmsg)
+{
+	struct kdbus_name_entry *name = NULL;
+	struct kdbus_reply *wait = NULL;
+	struct kdbus_conn *dst = NULL;
+	struct kdbus_bus *bus = src->ep->bus;
+	bool is_signal = (kmsg->msg.flags & KDBUS_MSG_SIGNAL);
+	u64 attach;
+	int ret = 0;
+
+	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
+	    WARN_ON(!(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) &&
+		    kmsg->msg.cookie_reply != 0))
+		return -EINVAL;
+
+	/* name-registry must be locked for lookup *and* collecting data */
+	down_read(&bus->name_registry->rwlock);
+
+	/* find and pin destination */
+
+	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
+	if (ret < 0)
+		goto exit;
+
+	if (is_signal) {
+		/* like broadcasts we eavesdrop even if the msg is dropped */
+		kdbus_bus_eavesdrop(bus, src, kmsg);
+
+		/* drop silently if peer is not interested or not privileged */
+		if (!kdbus_match_db_match_kmsg(dst->match_db, src, kmsg) ||
+		    !kdbus_conn_policy_talk(dst, NULL, src))
+			goto exit;
+	} else if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
+		ret = -EPERM;
+		goto exit;
+	} else if (kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) {
+		wait = kdbus_reply_new(dst, src, &kmsg->msg, name, false);
+		if (IS_ERR(wait)) {
+			ret = PTR_ERR(wait);
+			wait = NULL;
+			goto exit;
+		}
+	}
+
+	/* attach metadata */
+
+	attach = kdbus_meta_calc_attach_flags(src, dst);
+
+	if (!src->faked_meta) {
+		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
+		if (ret < 0 && !is_signal)
+			goto exit;
+	}
+
+	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
+	if (ret < 0 && !is_signal)
+		goto exit;
+
+	/* send message */
+
+	if (!is_signal)
+		kdbus_bus_eavesdrop(bus, src, kmsg);
+
+	ret = kdbus_conn_entry_insert(src, dst, kmsg, wait);
+	if (ret < 0 && !is_signal)
+		goto exit;
+
+	/* signals are treated like broadcasts, recv-errors are ignored */
+	ret = 0;
+
+exit:
+	up_read(&bus->name_registry->rwlock);
+	kdbus_reply_unref(wait);
+	kdbus_conn_unref(dst);
+	return ret;
+}
+
+/**
+ * kdbus_conn_move_messages() - move messages from one connection to another
+ * @conn_dst:		Connection to copy to
+ * @conn_src:		Connection to copy from
+ * @name_id:		Filter for the sequence number of the registered
+ *			name, 0 means no filtering.
+ *
+ * Move all messages from one connection to another. This is used when
+ * an implementer connection is taking over/giving back a well-known name
+ * from/to an activator connection.
+ */
+void kdbus_conn_move_messages(struct kdbus_conn *conn_dst,
+			      struct kdbus_conn *conn_src,
+			      u64 name_id)
+{
+	struct kdbus_queue_entry *e, *e_tmp;
+	struct kdbus_reply *r, *r_tmp;
+	struct kdbus_bus *bus;
+	struct kdbus_conn *c;
+	LIST_HEAD(msg_list);
+	int i, ret = 0;
+
+	if (WARN_ON(conn_src == conn_dst))
+		return;
+
+	bus = conn_src->ep->bus;
+
+	/* lock order: domain -> bus -> ep -> names -> conn */
+	down_read(&bus->conn_rwlock);
+	hash_for_each(bus->conn_hash, i, c, hentry) {
+		if (c == conn_src || c == conn_dst)
+			continue;
+
+		mutex_lock(&c->lock);
+		list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
+			if (r->reply_src != conn_src)
+				continue;
+
+			/* filter messages for a specific name */
+			if (name_id > 0 && r->name_id != name_id)
+				continue;
+
+			kdbus_conn_unref(r->reply_src);
+			r->reply_src = kdbus_conn_ref(conn_dst);
+		}
+		mutex_unlock(&c->lock);
+	}
+	up_read(&bus->conn_rwlock);
+
+	kdbus_conn_lock2(conn_src, conn_dst);
+	list_for_each_entry_safe(e, e_tmp, &conn_src->queue.msg_list, entry) {
+		/* filter messages for a specific name */
+		if (name_id > 0 && e->dst_name_id != name_id)
+			continue;
+
+		if (!(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
+		    e->msg_res && e->msg_res->fds_count > 0) {
+			kdbus_conn_lost_message(conn_dst);
+			kdbus_queue_entry_free(e);
+			continue;
+		}
+
+		ret = kdbus_queue_entry_move(e, conn_dst);
+		if (ret < 0) {
+			kdbus_conn_lost_message(conn_dst);
+			kdbus_queue_entry_free(e);
+			continue;
+		}
+	}
+	kdbus_conn_unlock2(conn_src, conn_dst);
+
+	/* wake up poll() */
+	wake_up_interruptible(&conn_dst->wait);
+}
+
+/* query the policy-database for all names of @whom */
+static bool kdbus_conn_policy_query_all(struct kdbus_conn *conn,
+					const struct cred *conn_creds,
+					struct kdbus_policy_db *db,
+					struct kdbus_conn *whom,
+					unsigned int access)
+{
+	struct kdbus_name_entry *ne;
+	bool pass = false;
+	int res;
+
+	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
+
+	down_read(&db->entries_rwlock);
+	mutex_lock(&whom->lock);
+
+	list_for_each_entry(ne, &whom->names_list, conn_entry) {
+		res = kdbus_policy_query_unlocked(db, conn_creds ? : conn->cred,
+						  ne->name,
+						  kdbus_strhash(ne->name));
+		if (res >= (int)access) {
+			pass = true;
+			break;
+		}
+	}
+
+	mutex_unlock(&whom->lock);
+	up_read(&db->entries_rwlock);
+
+	return pass;
+}
+
+/**
+ * kdbus_conn_policy_own_name() - verify a connection can own the given name
+ * @conn:		Connection
+ * @conn_creds:		Credentials of @conn to use for policy check
+ * @name:		Name
+ *
+ * This verifies that @conn is allowed to acquire the well-known name @name.
+ *
+ * Return: true if allowed, false if not.
+ */
+bool kdbus_conn_policy_own_name(struct kdbus_conn *conn,
+				const struct cred *conn_creds,
+				const char *name)
+{
+	unsigned int hash = kdbus_strhash(name);
+	int res;
+
+	if (!conn_creds)
+		conn_creds = conn->cred;
+
+	if (conn->ep->user) {
+		res = kdbus_policy_query(&conn->ep->policy_db, conn_creds,
+					 name, hash);
+		if (res < KDBUS_POLICY_OWN)
+			return false;
+	}
+
+	if (conn->privileged)
+		return true;
+
+	res = kdbus_policy_query(&conn->ep->bus->policy_db, conn_creds,
+				 name, hash);
+	return res >= KDBUS_POLICY_OWN;
+}
+
+/**
+ * kdbus_conn_policy_talk() - verify a connection can talk to a given peer
+ * @conn:		Connection that tries to talk
+ * @conn_creds:		Credentials of @conn to use for policy check
+ * @to:			Connection that is talked to
+ *
+ * This verifies that @conn is allowed to talk to @to.
+ *
+ * Return: true if allowed, false if not.
+ */
+bool kdbus_conn_policy_talk(struct kdbus_conn *conn,
+			    const struct cred *conn_creds,
+			    struct kdbus_conn *to)
+{
+	if (!conn_creds)
+		conn_creds = conn->cred;
+
+	if (conn->ep->user &&
+	    !kdbus_conn_policy_query_all(conn, conn_creds, &conn->ep->policy_db,
+					 to, KDBUS_POLICY_TALK))
+		return false;
+
+	if (conn->privileged)
+		return true;
+	if (uid_eq(conn_creds->euid, to->cred->uid))
+		return true;
+
+	return kdbus_conn_policy_query_all(conn, conn_creds,
+					   &conn->ep->bus->policy_db, to,
+					   KDBUS_POLICY_TALK);
+}
+
+/**
+ * kdbus_conn_policy_see_name_unlocked() - verify a connection can see a given
+ *					   name
+ * @conn:		Connection
+ * @conn_creds:		Credentials of @conn to use for policy check
+ * @name:		Name
+ *
+ * This verifies that @conn is allowed to see the well-known name @name. Caller
+ * must hold policy-lock.
+ *
+ * Return: true if allowed, false if not.
+ */
+bool kdbus_conn_policy_see_name_unlocked(struct kdbus_conn *conn,
+					 const struct cred *conn_creds,
+					 const char *name)
+{
+	int res;
+
+	/*
+	 * By default, all names are visible on a bus. SEE policies can only be
+	 * installed on custom endpoints, where by default no name is visible.
+	 */
+	if (!conn->ep->user)
+		return true;
+
+	res = kdbus_policy_query_unlocked(&conn->ep->policy_db,
+					  conn_creds ? : conn->cred,
+					  name, kdbus_strhash(name));
+	return res >= KDBUS_POLICY_SEE;
+}
+
+static bool kdbus_conn_policy_see_name(struct kdbus_conn *conn,
+				       const struct cred *conn_creds,
+				       const char *name)
+{
+	bool res;
+
+	down_read(&conn->ep->policy_db.entries_rwlock);
+	res = kdbus_conn_policy_see_name_unlocked(conn, conn_creds, name);
+	up_read(&conn->ep->policy_db.entries_rwlock);
+
+	return res;
+}
+
+static bool kdbus_conn_policy_see(struct kdbus_conn *conn,
+				  const struct cred *conn_creds,
+				  struct kdbus_conn *whom)
+{
+	/*
+	 * By default, all names are visible on a bus, so a connection can
+	 * always see other connections. SEE policies can only be installed on
+	 * custom endpoints, where by default no name is visible and we hide
+	 * peers from each other, unless you see at least _one_ name of the
+	 * peer.
+	 */
+	return !conn->ep->user ||
+	       kdbus_conn_policy_query_all(conn, conn_creds,
+					   &conn->ep->policy_db, whom,
+					   KDBUS_POLICY_SEE);
+}
+
+/**
+ * kdbus_conn_policy_see_notification() - verify a connection is allowed to
+ *					  receive a given kernel notification
+ * @conn:		Connection
+ * @conn_creds:		Credentials of @conn to use for policy check
+ * @kmsg:		The message carrying the notification
+ *
+ * This checks whether @conn is allowed to see the kernel notification @kmsg.
+ *
+ * Return: true if allowed, false if not.
+ */
+bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
+					const struct cred *conn_creds,
+					const struct kdbus_kmsg *kmsg)
+{
+	if (WARN_ON(kmsg->msg.src_id != KDBUS_SRC_ID_KERNEL))
+		return false;
+
+	/*
+	 * Depending on the notification type, broadcasted kernel notifications
+	 * have to be filtered:
+	 *
+	 * KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE}: This notification is forwarded
+	 *     to a peer if, and only if, that peer can see the name this
+	 *     notification is for.
+	 *
+	 * KDBUS_ITEM_ID_{ADD,REMOVE}: As new peers cannot have names, and all
+	 *     names are dropped before a peer is removed, those notifications
+	 *     cannot be seen on custom endpoints. Thus, we only pass them
+	 *     through on default endpoints.
+	 */
+
+	switch (kmsg->notify_type) {
+	case KDBUS_ITEM_NAME_ADD:
+	case KDBUS_ITEM_NAME_REMOVE:
+	case KDBUS_ITEM_NAME_CHANGE:
+		return kdbus_conn_policy_see_name(conn, conn_creds,
+						  kmsg->notify_name);
+
+	case KDBUS_ITEM_ID_ADD:
+	case KDBUS_ITEM_ID_REMOVE:
+		return !conn->ep->user;
+
+	default:
+		WARN(1, "Invalid type for notification broadcast: %llu\n",
+		     (unsigned long long)kmsg->notify_type);
+		return false;
+	}
+}
+
+/**
+ * kdbus_cmd_hello() - handle KDBUS_CMD_HELLO
+ * @ep:			Endpoint to operate on
+ * @privileged:		Whether the caller is privileged
+ * @argp:		Command payload
+ *
+ * Return: Newly created connection on success, ERR_PTR on failure.
+ */
+struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, bool privileged,
+				   void __user *argp)
+{
+	struct kdbus_cmd_hello *cmd;
+	struct kdbus_conn *c = NULL;
+	const char *item_name;
+	int ret;
+
+	struct kdbus_arg argv[] = {
+		{ .type = KDBUS_ITEM_NEGOTIATE },
+		{ .type = KDBUS_ITEM_NAME },
+		{ .type = KDBUS_ITEM_CREDS },
+		{ .type = KDBUS_ITEM_PIDS },
+		{ .type = KDBUS_ITEM_SECLABEL },
+		{ .type = KDBUS_ITEM_CONN_DESCRIPTION },
+		{ .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
+	};
+	struct kdbus_args args = {
+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
+				 KDBUS_HELLO_ACCEPT_FD |
+				 KDBUS_HELLO_ACTIVATOR |
+				 KDBUS_HELLO_POLICY_HOLDER |
+				 KDBUS_HELLO_MONITOR,
+		.argv = argv,
+		.argc = ARRAY_SIZE(argv),
+	};
+
+	ret = kdbus_args_parse(&args, argp, &cmd);
+	if (ret < 0)
+		return ERR_PTR(ret);
+	if (ret > 0)
+		return NULL;
+
+	item_name = argv[1].item ? argv[1].item->str : NULL;
+
+	c = kdbus_conn_new(ep, privileged, cmd, item_name,
+			   argv[2].item ? &argv[2].item->creds : NULL,
+			   argv[3].item ? &argv[3].item->pids : NULL,
+			   argv[4].item ? argv[4].item->str : NULL,
+			   argv[5].item ? argv[5].item->str : NULL);
+	if (IS_ERR(c)) {
+		ret = PTR_ERR(c);
+		c = NULL;
+		goto exit;
+	}
+
+	ret = kdbus_conn_connect(c, item_name);
+	if (ret < 0)
+		goto exit;
+
+	if (kdbus_conn_is_activator(c) || kdbus_conn_is_policy_holder(c)) {
+		ret = kdbus_conn_acquire(c);
+		if (ret < 0)
+			goto exit;
+
+		ret = kdbus_policy_set(&c->ep->bus->policy_db, args.items,
+				       args.items_size, 1,
+				       kdbus_conn_is_policy_holder(c), c);
+		kdbus_conn_release(c);
+		if (ret < 0)
+			goto exit;
+	}
+
+	if (copy_to_user(argp, cmd, sizeof(*cmd)))
+		ret = -EFAULT;
+
+exit:
+	ret = kdbus_args_clear(&args, ret);
+	if (ret < 0) {
+		if (c) {
+			kdbus_conn_disconnect(c, false);
+			kdbus_conn_unref(c);
+		}
+		return ERR_PTR(ret);
+	}
+	return c;
+}
+
+/**
+ * kdbus_cmd_byebye_unlocked() - handle KDBUS_CMD_BYEBYE
+ * @conn:		connection to operate on
+ * @argp:		command payload
+ *
+ * The caller must not hold any active reference to @conn or this will deadlock.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp)
+{
+	struct kdbus_cmd *cmd;
+	int ret;
+
+	struct kdbus_arg argv[] = {
+		{ .type = KDBUS_ITEM_NEGOTIATE },
+	};
+	struct kdbus_args args = {
+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
+		.argv = argv,
+		.argc = ARRAY_SIZE(argv),
+	};
+
+	if (!kdbus_conn_is_ordinary(conn))
+		return -EOPNOTSUPP;
+
+	ret = kdbus_args_parse(&args, argp, &cmd);
+	if (ret != 0)
+		return ret;
+
+	ret = kdbus_conn_disconnect(conn, true);
+	return kdbus_args_clear(&args, ret);
+}
+
+/**
+ * kdbus_cmd_conn_info() - handle KDBUS_CMD_CONN_INFO
+ * @conn:		connection to operate on
+ * @argp:		command payload
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp)
+{
+	struct kdbus_meta_conn *conn_meta = NULL;
+	struct kdbus_pool_slice *slice = NULL;
+	struct kdbus_name_entry *entry = NULL;
+	struct kdbus_conn *owner_conn = NULL;
+	struct kdbus_info info = {};
+	struct kdbus_cmd_info *cmd;
+	struct kdbus_bus *bus = conn->ep->bus;
+	struct kvec kvec;
+	size_t meta_size;
+	const char *name;
+	u64 attach_flags;
+	int ret;
+
+	struct kdbus_arg argv[] = {
+		{ .type = KDBUS_ITEM_NEGOTIATE },
+		{ .type = KDBUS_ITEM_NAME },
+	};
+	struct kdbus_args args = {
+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
+		.argv = argv,
+		.argc = ARRAY_SIZE(argv),
+	};
+
+	ret = kdbus_args_parse(&args, argp, &cmd);
+	if (ret != 0)
+		return ret;
+
+	/* registry must be held throughout lookup *and* collecting data */
+	down_read(&bus->name_registry->rwlock);
+
+	ret = kdbus_sanitize_attach_flags(cmd->attach_flags, &attach_flags);
+	if (ret < 0)
+		goto exit;
+
+	name = argv[1].item ? argv[1].item->str : NULL;
+
+	if (name) {
+		entry = kdbus_name_lookup_unlocked(bus->name_registry, name);
+		if (!entry || !entry->conn ||
+		    !kdbus_conn_policy_see_name(conn, current_cred(), name) ||
+		    (cmd->id != 0 && entry->conn->id != cmd->id)) {
+			/* pretend a name doesn't exist if you cannot see it */
+			ret = -ESRCH;
+			goto exit;
+		}
+
+		owner_conn = kdbus_conn_ref(entry->conn);
+	} else if (cmd->id > 0) {
+		owner_conn = kdbus_bus_find_conn_by_id(bus, cmd->id);
+		if (!owner_conn || !kdbus_conn_policy_see(conn, current_cred(),
+							  owner_conn)) {
+			/* pretend an id doesn't exist if you cannot see it */
+			ret = -ENXIO;
+			goto exit;
+		}
+	} else {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	info.id = owner_conn->id;
+	info.flags = owner_conn->flags;
+	kdbus_kvec_set(&kvec, &info, sizeof(info), &info.size);
+
+	attach_flags &= atomic64_read(&owner_conn->attach_flags_send);
+
+	conn_meta = kdbus_meta_conn_new();
+	if (IS_ERR(conn_meta)) {
+		ret = PTR_ERR(conn_meta);
+		conn_meta = NULL;
+		goto exit;
+	}
+
+	ret = kdbus_meta_conn_collect(conn_meta, NULL, owner_conn,
+				      attach_flags);
+	if (ret < 0)
+		goto exit;
+
+	ret = kdbus_meta_export_prepare(owner_conn->meta, conn_meta,
+					&attach_flags, &meta_size);
+	if (ret < 0)
+		goto exit;
+
+	slice = kdbus_pool_slice_alloc(conn->pool,
+				       info.size + meta_size, false);
+	if (IS_ERR(slice)) {
+		ret = PTR_ERR(slice);
+		slice = NULL;
+		goto exit;
+	}
+
+	ret = kdbus_meta_export(owner_conn->meta, conn_meta, attach_flags,
+				slice, sizeof(info), &meta_size);
+	if (ret < 0)
+		goto exit;
+
+	info.size += meta_size;
+
+	ret = kdbus_pool_slice_copy_kvec(slice, 0, &kvec, 1, sizeof(info));
+	if (ret < 0)
+		goto exit;
+
+	kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size);
+
+	if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
+	    kdbus_member_set_user(&cmd->info_size, argp,
+				  typeof(*cmd), info_size)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	ret = 0;
+
+exit:
+	up_read(&bus->name_registry->rwlock);
+	kdbus_pool_slice_release(slice);
+	kdbus_meta_conn_unref(conn_meta);
+	kdbus_conn_unref(owner_conn);
+	return kdbus_args_clear(&args, ret);
+}
+
+/**
+ * kdbus_cmd_update() - handle KDBUS_CMD_UPDATE
+ * @conn:		connection to operate on
+ * @argp:		command payload
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp)
+{
+	struct kdbus_bus *bus = conn->ep->bus;
+	struct kdbus_item *item_policy;
+	u64 *item_attach_send = NULL;
+	u64 *item_attach_recv = NULL;
+	struct kdbus_cmd *cmd;
+	u64 attach_send;
+	u64 attach_recv;
+	int ret;
+
+	struct kdbus_arg argv[] = {
+		{ .type = KDBUS_ITEM_NEGOTIATE },
+		{ .type = KDBUS_ITEM_ATTACH_FLAGS_SEND },
+		{ .type = KDBUS_ITEM_ATTACH_FLAGS_RECV },
+		{ .type = KDBUS_ITEM_NAME, .multiple = true },
+		{ .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
+	};
+	struct kdbus_args args = {
+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
+		.argv = argv,
+		.argc = ARRAY_SIZE(argv),
+	};
+
+	ret = kdbus_args_parse(&args, argp, &cmd);
+	if (ret != 0)
+		return ret;
+
+	item_attach_send = argv[1].item ? &argv[1].item->data64[0] : NULL;
+	item_attach_recv = argv[2].item ? &argv[2].item->data64[0] : NULL;
+	item_policy = argv[3].item ? : argv[4].item;
+
+	if (item_attach_send) {
+		if (!kdbus_conn_is_ordinary(conn) &&
+		    !kdbus_conn_is_monitor(conn)) {
+			ret = -EOPNOTSUPP;
+			goto exit;
+		}
+
+		ret = kdbus_sanitize_attach_flags(*item_attach_send,
+						  &attach_send);
+		if (ret < 0)
+			goto exit;
+
+		if (bus->attach_flags_req & ~attach_send) {
+			ret = -EINVAL;
+			goto exit;
+		}
+	}
+
+	if (item_attach_recv) {
+		if (!kdbus_conn_is_ordinary(conn) &&
+		    !kdbus_conn_is_monitor(conn) &&
+		    !kdbus_conn_is_activator(conn)) {
+			ret = -EOPNOTSUPP;
+			goto exit;
+		}
+
+		ret = kdbus_sanitize_attach_flags(*item_attach_recv,
+						  &attach_recv);
+		if (ret < 0)
+			goto exit;
+	}
+
+	if (item_policy && !kdbus_conn_is_policy_holder(conn)) {
+		ret = -EOPNOTSUPP;
+		goto exit;
+	}
+
+	/* now that we verified the input, update the connection */
+
+	if (item_policy) {
+		ret = kdbus_policy_set(&conn->ep->bus->policy_db, cmd->items,
+				       KDBUS_ITEMS_SIZE(cmd, items),
+				       1, true, conn);
+		if (ret < 0)
+			goto exit;
+	}
+
+	if (item_attach_send)
+		atomic64_set(&conn->attach_flags_send, attach_send);
+
+	if (item_attach_recv)
+		atomic64_set(&conn->attach_flags_recv, attach_recv);
+
+exit:
+	return kdbus_args_clear(&args, ret);
+}
+
+/**
+ * kdbus_cmd_send() - handle KDBUS_CMD_SEND
+ * @conn:		connection to operate on
+ * @f:			file this command was called on
+ * @argp:		command payload
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp)
+{
+	struct kdbus_cmd_send *cmd;
+	struct kdbus_kmsg *kmsg = NULL;
+	struct file *cancel_fd = NULL;
+	int ret;
+
+	struct kdbus_arg argv[] = {
+		{ .type = KDBUS_ITEM_NEGOTIATE },
+		{ .type = KDBUS_ITEM_CANCEL_FD },
+	};
+	struct kdbus_args args = {
+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
+				 KDBUS_SEND_SYNC_REPLY,
+		.argv = argv,
+		.argc = ARRAY_SIZE(argv),
+	};
+
+	if (!kdbus_conn_is_ordinary(conn))
+		return -EOPNOTSUPP;
+
+	ret = kdbus_args_parse(&args, argp, &cmd);
+	if (ret != 0)
+		return ret;
+
+	cmd->reply.return_flags = 0;
+	kdbus_pool_publish_empty(conn->pool, &cmd->reply.offset,
+				 &cmd->reply.msg_size);
+
+	if (argv[1].item) {
+		cancel_fd = fget(argv[1].item->fds[0]);
+		if (IS_ERR(cancel_fd)) {
+			ret = PTR_ERR(cancel_fd);
+			cancel_fd = NULL;
+			goto exit;
+		}
+
+		if (!cancel_fd->f_op->poll) {
+			ret = -EINVAL;
+			goto exit;
+		}
+	}
+
+	kmsg = kdbus_kmsg_new_from_cmd(conn, cmd);
+	if (IS_ERR(kmsg)) {
+		ret = PTR_ERR(kmsg);
+		kmsg = NULL;
+		goto exit;
+	}
+
+	if (kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) {
+		down_read(&conn->ep->bus->name_registry->rwlock);
+		kdbus_bus_broadcast(conn->ep->bus, conn, kmsg);
+		up_read(&conn->ep->bus->name_registry->rwlock);
+	} else if (cmd->flags & KDBUS_SEND_SYNC_REPLY) {
+		struct kdbus_reply *r;
+		ktime_t exp;
+
+		exp = ns_to_ktime(kmsg->msg.timeout_ns);
+		r = kdbus_conn_call(conn, kmsg, exp);
+		if (IS_ERR(r)) {
+			ret = PTR_ERR(r);
+			goto exit;
+		}
+
+		ret = kdbus_conn_wait_reply(conn, cmd, f, cancel_fd, r, exp);
+		kdbus_reply_unref(r);
+		if (ret < 0)
+			goto exit;
+	} else if ((kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) ||
+		   kmsg->msg.cookie_reply == 0) {
+		ret = kdbus_conn_unicast(conn, kmsg);
+		if (ret < 0)
+			goto exit;
+	} else {
+		ret = kdbus_conn_reply(conn, kmsg);
+		if (ret < 0)
+			goto exit;
+	}
+
+	if (kdbus_member_set_user(&cmd->reply, argp, typeof(*cmd), reply))
+		ret = -EFAULT;
+
+exit:
+	if (cancel_fd)
+		fput(cancel_fd);
+	kdbus_kmsg_free(kmsg);
+	return kdbus_args_clear(&args, ret);
+}
+
+/**
+ * kdbus_cmd_recv() - handle KDBUS_CMD_RECV
+ * @conn:		connection to operate on
+ * @argp:		command payload
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_cmd_recv(struct kdbus_conn *conn, void __user *argp)
+{
+	struct kdbus_queue_entry *entry;
+	struct kdbus_cmd_recv *cmd;
+	int ret;
+
+	struct kdbus_arg argv[] = {
+		{ .type = KDBUS_ITEM_NEGOTIATE },
+	};
+	struct kdbus_args args = {
+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
+				 KDBUS_RECV_PEEK |
+				 KDBUS_RECV_DROP |
+				 KDBUS_RECV_USE_PRIORITY,
+		.argv = argv,
+		.argc = ARRAY_SIZE(argv),
+	};
+
+	if (!kdbus_conn_is_ordinary(conn) &&
+	    !kdbus_conn_is_monitor(conn) &&
+	    !kdbus_conn_is_activator(conn))
+		return -EOPNOTSUPP;
+
+	ret = kdbus_args_parse(&args, argp, &cmd);
+	if (ret != 0)
+		return ret;
+
+	cmd->dropped_msgs = 0;
+	cmd->msg.return_flags = 0;
+	kdbus_pool_publish_empty(conn->pool, &cmd->msg.offset,
+				 &cmd->msg.msg_size);
+
+	/* DROP+priority is not realiably, so prevent it */
+	if ((cmd->flags & KDBUS_RECV_DROP) &&
+	    (cmd->flags & KDBUS_RECV_USE_PRIORITY)) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	mutex_lock(&conn->lock);
+
+	entry = kdbus_queue_peek(&conn->queue, cmd->priority,
+				 cmd->flags & KDBUS_RECV_USE_PRIORITY);
+	if (!entry) {
+		mutex_unlock(&conn->lock);
+		ret = -EAGAIN;
+	} else if (cmd->flags & KDBUS_RECV_DROP) {
+		struct kdbus_reply *reply = kdbus_reply_ref(entry->reply);
+
+		kdbus_queue_entry_free(entry);
+
+		mutex_unlock(&conn->lock);
+
+		if (reply) {
+			mutex_lock(&reply->reply_dst->lock);
+			if (!list_empty(&reply->entry)) {
+				kdbus_reply_unlink(reply);
+				if (reply->sync)
+					kdbus_sync_reply_wakeup(reply, -EPIPE);
+				else
+					kdbus_notify_reply_dead(conn->ep->bus,
+							reply->reply_dst->id,
+							reply->cookie);
+			}
+			mutex_unlock(&reply->reply_dst->lock);
+			kdbus_notify_flush(conn->ep->bus);
+		}
+
+		kdbus_reply_unref(reply);
+	} else {
+		bool install_fds;
+
+		/*
+		 * PEEK just returns the location of the next message. Do not
+		 * install FDs nor memfds nor anything else. The only
+		 * information of interest should be the message header and
+		 * metadata. Any FD numbers in the payload is undefined for
+		 * PEEK'ed messages.
+		 * Also make sure to never install fds into a connection that
+		 * has refused to receive any. Ordinary connections will not get
+		 * messages with FDs queued (the receiver will get -ECOMM), but
+		 * eavesdroppers might.
+		 */
+		install_fds = (conn->flags & KDBUS_HELLO_ACCEPT_FD) &&
+			      !(cmd->flags & KDBUS_RECV_PEEK);
+
+		ret = kdbus_queue_entry_install(entry,
+						&cmd->msg.return_flags,
+						install_fds);
+		if (ret < 0) {
+			mutex_unlock(&conn->lock);
+			goto exit;
+		}
+
+		kdbus_pool_slice_publish(entry->slice, &cmd->msg.offset,
+					 &cmd->msg.msg_size);
+
+		if (!(cmd->flags & KDBUS_RECV_PEEK))
+			kdbus_queue_entry_free(entry);
+
+		mutex_unlock(&conn->lock);
+	}
+
+	cmd->dropped_msgs = atomic_xchg(&conn->lost_count, 0);
+	if (cmd->dropped_msgs > 0)
+		cmd->return_flags |= KDBUS_RECV_RETURN_DROPPED_MSGS;
+
+	if (kdbus_member_set_user(&cmd->msg, argp, typeof(*cmd), msg) ||
+	    kdbus_member_set_user(&cmd->dropped_msgs, argp, typeof(*cmd),
+				  dropped_msgs))
+		ret = -EFAULT;
+
+exit:
+	return kdbus_args_clear(&args, ret);
+}
+
+/**
+ * kdbus_cmd_free() - handle KDBUS_CMD_FREE
+ * @conn:		connection to operate on
+ * @argp:		command payload
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_cmd_free(struct kdbus_conn *conn, void __user *argp)
+{
+	struct kdbus_cmd_free *cmd;
+	int ret;
+
+	struct kdbus_arg argv[] = {
+		{ .type = KDBUS_ITEM_NEGOTIATE },
+	};
+	struct kdbus_args args = {
+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
+		.argv = argv,
+		.argc = ARRAY_SIZE(argv),
+	};
+
+	if (!kdbus_conn_is_ordinary(conn) &&
+	    !kdbus_conn_is_monitor(conn) &&
+	    !kdbus_conn_is_activator(conn))
+		return -EOPNOTSUPP;
+
+	ret = kdbus_args_parse(&args, argp, &cmd);
+	if (ret != 0)
+		return ret;
+
+	ret = kdbus_pool_release_offset(conn->pool, cmd->offset);
+
+	return kdbus_args_clear(&args, ret);
+}
diff --git a/ipc/kdbus/connection.h b/ipc/kdbus/connection.h
new file mode 100644
index 000000000000..d1ffe909cb31
--- /dev/null
+++ b/ipc/kdbus/connection.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ * Copyright (C) 2014-2015 Djalal Harouni
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __KDBUS_CONNECTION_H
+#define __KDBUS_CONNECTION_H
+
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/lockdep.h>
+#include <linux/path.h>
+
+#include "limits.h"
+#include "metadata.h"
+#include "pool.h"
+#include "queue.h"
+#include "util.h"
+
+#define KDBUS_HELLO_SPECIAL_CONN	(KDBUS_HELLO_ACTIVATOR | \
+					 KDBUS_HELLO_POLICY_HOLDER | \
+					 KDBUS_HELLO_MONITOR)
+
+struct kdbus_quota;
+struct kdbus_kmsg;
+
+/**
+ * struct kdbus_conn - connection to a bus
+ * @kref:		Reference count
+ * @active:		Active references to the connection
+ * @id:			Connection ID
+ * @flags:		KDBUS_HELLO_* flags
+ * @attach_flags_send:	KDBUS_ATTACH_* flags for sending
+ * @attach_flags_recv:	KDBUS_ATTACH_* flags for receiving
+ * @description:	Human-readable connection description, used for
+ *			debugging. This field is only set when the
+ *			connection is created.
+ * @ep:			The endpoint this connection belongs to
+ * @lock:		Connection data lock
+ * @hentry:		Entry in ID <-> connection map
+ * @ep_entry:		Entry in endpoint
+ * @monitor_entry:	Entry in monitor, if the connection is a monitor
+ * @reply_list:		List of connections this connection should
+ *			reply to
+ * @work:		Delayed work to handle timeouts
+ *			activator for
+ * @match_db:		Subscription filter to broadcast messages
+ * @meta:		Active connection creator's metadata/credentials,
+ *			either from the handle or from HELLO
+ * @pool:		The user's buffer to receive messages
+ * @user:		Owner of the connection
+ * @cred:		The credentials of the connection at creation time
+ * @name_count:		Number of owned well-known names
+ * @request_count:	Number of pending requests issued by this
+ *			connection that are waiting for replies from
+ *			other peers
+ * @lost_count:		Number of lost broadcast messages
+ * @wait:		Wake up this endpoint
+ * @queue:		The message queue associated with this connection
+ * @quota:		Array of per-user quota indexed by user->id
+ * @n_quota:		Number of elements in quota array
+ * @activator_of:	Well-known name entry this connection acts as an
+ * @names_list:		List of well-known names
+ * @names_queue_list:	Well-known names this connection waits for
+ * @privileged:		Whether this connection is privileged on the bus
+ * @faked_meta:		Whether the metadata was faked on HELLO
+ */
+struct kdbus_conn {
+	struct kref kref;
+	atomic_t active;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map dep_map;
+#endif
+	u64 id;
+	u64 flags;
+	atomic64_t attach_flags_send;
+	atomic64_t attach_flags_recv;
+	const char *description;
+	struct kdbus_ep *ep;
+	struct mutex lock;
+	struct hlist_node hentry;
+	struct list_head ep_entry;
+	struct list_head monitor_entry;
+	struct list_head reply_list;
+	struct delayed_work work;
+	struct kdbus_match_db *match_db;
+	struct kdbus_meta_proc *meta;
+	struct kdbus_pool *pool;
+	struct kdbus_user *user;
+	const struct cred *cred;
+	atomic_t name_count;
+	atomic_t request_count;
+	atomic_t lost_count;
+	wait_queue_head_t wait;
+	struct kdbus_queue queue;
+
+	struct kdbus_quota *quota;
+	unsigned int n_quota;
+
+	/* protected by registry->rwlock */
+	struct kdbus_name_entry *activator_of;
+	struct list_head names_list;
+	struct list_head names_queue_list;
+
+	bool privileged:1;
+	bool faked_meta:1;
+};
+
+struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn);
+struct kdbus_conn *kdbus_conn_unref(struct kdbus_conn *conn);
+bool kdbus_conn_active(const struct kdbus_conn *conn);
+int kdbus_conn_acquire(struct kdbus_conn *conn);
+void kdbus_conn_release(struct kdbus_conn *conn);
+int kdbus_conn_disconnect(struct kdbus_conn *conn, bool ensure_queue_empty);
+bool kdbus_conn_has_name(struct kdbus_conn *conn, const char *name);
+int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
+			 size_t memory, size_t fds);
+void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
+			  size_t memory, size_t fds);
+void kdbus_conn_lost_message(struct kdbus_conn *c);
+int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
+			    struct kdbus_conn *conn_dst,
+			    const struct kdbus_kmsg *kmsg,
+			    struct kdbus_reply *reply);
+void kdbus_conn_move_messages(struct kdbus_conn *conn_dst,
+			      struct kdbus_conn *conn_src,
+			      u64 name_id);
+
+/* policy */
+bool kdbus_conn_policy_own_name(struct kdbus_conn *conn,
+				const struct cred *conn_creds,
+				const char *name);
+bool kdbus_conn_policy_talk(struct kdbus_conn *conn,
+			    const struct cred *conn_creds,
+			    struct kdbus_conn *to);
+bool kdbus_conn_policy_see_name_unlocked(struct kdbus_conn *conn,
+					 const struct cred *curr_creds,
+					 const char *name);
+bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
+					const struct cred *curr_creds,
+					const struct kdbus_kmsg *kmsg);
+
+/* command dispatcher */
+struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, bool privileged,
+				   void __user *argp);
+int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp);
+int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp);
+int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp);
+int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp);
+int kdbus_cmd_recv(struct kdbus_conn *conn, void __user *argp);
+int kdbus_cmd_free(struct kdbus_conn *conn, void __user *argp);
+
+/**
+ * kdbus_conn_is_ordinary() - Check if connection is ordinary
+ * @conn:		The connection to check
+ *
+ * Return: Non-zero if the connection is an ordinary connection
+ */
+static inline int kdbus_conn_is_ordinary(const struct kdbus_conn *conn)
+{
+	return !(conn->flags & KDBUS_HELLO_SPECIAL_CONN);
+}
+
+/**
+ * kdbus_conn_is_activator() - Check if connection is an activator
+ * @conn:		The connection to check
+ *
+ * Return: Non-zero if the connection is an activator
+ */
+static inline int kdbus_conn_is_activator(const struct kdbus_conn *conn)
+{
+	return conn->flags & KDBUS_HELLO_ACTIVATOR;
+}
+
+/**
+ * kdbus_conn_is_policy_holder() - Check if connection is a policy holder
+ * @conn:		The connection to check
+ *
+ * Return: Non-zero if the connection is a policy holder
+ */
+static inline int kdbus_conn_is_policy_holder(const struct kdbus_conn *conn)
+{
+	return conn->flags & KDBUS_HELLO_POLICY_HOLDER;
+}
+
+/**
+ * kdbus_conn_is_monitor() - Check if connection is a monitor
+ * @conn:		The connection to check
+ *
+ * Return: Non-zero if the connection is a monitor
+ */
+static inline int kdbus_conn_is_monitor(const struct kdbus_conn *conn)
+{
+	return conn->flags & KDBUS_HELLO_MONITOR;
+}
+
+/**
+ * kdbus_conn_lock2() - Lock two connections
+ * @a:		connection A to lock or NULL
+ * @b:		connection B to lock or NULL
+ *
+ * Lock two connections at once. As we need to have a stable locking order, we
+ * always lock the connection with lower memory address first.
+ */
+static inline void kdbus_conn_lock2(struct kdbus_conn *a, struct kdbus_conn *b)
+{
+	if (a < b) {
+		if (a)
+			mutex_lock(&a->lock);
+		if (b && b != a)
+			mutex_lock_nested(&b->lock, !!a);
+	} else {
+		if (b)
+			mutex_lock(&b->lock);
+		if (a && a != b)
+			mutex_lock_nested(&a->lock, !!b);
+	}
+}
+
+/**
+ * kdbus_conn_unlock2() - Unlock two connections
+ * @a:		connection A to unlock or NULL
+ * @b:		connection B to unlock or NULL
+ *
+ * Unlock two connections at once. See kdbus_conn_lock2().
+ */
+static inline void kdbus_conn_unlock2(struct kdbus_conn *a,
+				      struct kdbus_conn *b)
+{
+	if (a)
+		mutex_unlock(&a->lock);
+	if (b && b != a)
+		mutex_unlock(&b->lock);
+}
+
+/**
+ * kdbus_conn_assert_active() - lockdep assert on active lock
+ * @conn:	connection that shall be active
+ *
+ * This verifies via lockdep that the caller holds an active reference to the
+ * given connection.
+ */
+static inline void kdbus_conn_assert_active(struct kdbus_conn *conn)
+{
+	lockdep_assert_held(conn);
+}
+
+#endif
diff --git a/ipc/kdbus/item.c b/ipc/kdbus/item.c
new file mode 100644
index 000000000000..745ad5495096
--- /dev/null
+++ b/ipc/kdbus/item.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/ctype.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+
+#include "item.h"
+#include "limits.h"
+#include "util.h"
+
+/*
+ * This verifies the string at position @str with size @size is properly
+ * zero-terminated and does not contain a 0-byte but at the end.
+ */
+static bool kdbus_str_valid(const char *str, size_t size)
+{
+	return size > 0 && memchr(str, '\0', size) == str + size - 1;
+}
+
+/**
+ * kdbus_item_validate_name() - validate an item containing a name
+ * @item:		Item to validate
+ *
+ * Return: zero on success or an negative error code on failure
+ */
+int kdbus_item_validate_name(const struct kdbus_item *item)
+{
+	const char *name = item->str;
+	unsigned int i;
+	size_t len;
+
+	if (item->size < KDBUS_ITEM_HEADER_SIZE + 2)
+		return -EINVAL;
+
+	if (item->size > KDBUS_ITEM_HEADER_SIZE +
+			 KDBUS_SYSNAME_MAX_LEN + 1)
+		return -ENAMETOOLONG;
+
+	if (!kdbus_str_valid(name, KDBUS_ITEM_PAYLOAD_SIZE(item)))
+		return -EINVAL;
+
+	len = strlen(name);
+	if (len == 0)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		if (isalpha(name[i]))
+			continue;
+		if (isdigit(name[i]))
+			continue;
+		if (name[i] == '_')
+			continue;
+		if (i > 0 && i + 1 < len && (name[i] == '-' || name[i] == '.'))
+			continue;
+
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * kdbus_item_validate() - validate a single item
+ * @item:	item to validate
+ *
+ * Return: 0 if item is valid, negative error code if not.
+ */
+int kdbus_item_validate(const struct kdbus_item *item)
+{
+	size_t payload_size = KDBUS_ITEM_PAYLOAD_SIZE(item);
+	size_t l;
+	int ret;
+
+	BUILD_BUG_ON(KDBUS_ITEM_HEADER_SIZE !=
+		     sizeof(struct kdbus_item_header));
+
+	if (item->size < KDBUS_ITEM_HEADER_SIZE)
+		return -EINVAL;
+
+	switch (item->type) {
+	case KDBUS_ITEM_NEGOTIATE:
+		if (payload_size % sizeof(u64) != 0)
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_PAYLOAD_VEC:
+		if (payload_size != sizeof(struct kdbus_vec))
+			return -EINVAL;
+		if (item->vec.size == 0 || item->vec.size > SIZE_MAX)
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_PAYLOAD_OFF:
+		if (payload_size != sizeof(struct kdbus_vec))
+			return -EINVAL;
+		if (item->vec.size == 0 || item->vec.size > SIZE_MAX)
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_PAYLOAD_MEMFD:
+		if (payload_size != sizeof(struct kdbus_memfd))
+			return -EINVAL;
+		if (item->memfd.size == 0 || item->memfd.size > SIZE_MAX)
+			return -EINVAL;
+		if (item->memfd.fd < 0)
+			return -EBADF;
+		break;
+
+	case KDBUS_ITEM_FDS:
+		if (payload_size % sizeof(int) != 0)
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_CANCEL_FD:
+		if (payload_size != sizeof(int))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_BLOOM_PARAMETER:
+		if (payload_size != sizeof(struct kdbus_bloom_parameter))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_BLOOM_FILTER:
+		/* followed by the bloom-mask, depends on the bloom-size */
+		if (payload_size < sizeof(struct kdbus_bloom_filter))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_BLOOM_MASK:
+		/* size depends on bloom-size of bus */
+		break;
+
+	case KDBUS_ITEM_CONN_DESCRIPTION:
+	case KDBUS_ITEM_MAKE_NAME:
+		ret = kdbus_item_validate_name(item);
+		if (ret < 0)
+			return ret;
+		break;
+
+	case KDBUS_ITEM_ATTACH_FLAGS_SEND:
+	case KDBUS_ITEM_ATTACH_FLAGS_RECV:
+	case KDBUS_ITEM_ID:
+		if (payload_size != sizeof(u64))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_TIMESTAMP:
+		if (payload_size != sizeof(struct kdbus_timestamp))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_CREDS:
+		if (payload_size != sizeof(struct kdbus_creds))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_AUXGROUPS:
+		if (payload_size % sizeof(u32) != 0)
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_NAME:
+	case KDBUS_ITEM_DST_NAME:
+	case KDBUS_ITEM_PID_COMM:
+	case KDBUS_ITEM_TID_COMM:
+	case KDBUS_ITEM_EXE:
+	case KDBUS_ITEM_CMDLINE:
+	case KDBUS_ITEM_CGROUP:
+	case KDBUS_ITEM_SECLABEL:
+		if (!kdbus_str_valid(item->str, payload_size))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_CAPS:
+		if (payload_size < sizeof(u32))
+			return -EINVAL;
+		if (payload_size < sizeof(u32) +
+		    4 * CAP_TO_INDEX(item->caps.last_cap) * sizeof(u32))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_AUDIT:
+		if (payload_size != sizeof(struct kdbus_audit))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_POLICY_ACCESS:
+		if (payload_size != sizeof(struct kdbus_policy_access))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_NAME_ADD:
+	case KDBUS_ITEM_NAME_REMOVE:
+	case KDBUS_ITEM_NAME_CHANGE:
+		if (payload_size < sizeof(struct kdbus_notify_name_change))
+			return -EINVAL;
+		l = payload_size - offsetof(struct kdbus_notify_name_change,
+					    name);
+		if (l > 0 && !kdbus_str_valid(item->name_change.name, l))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_ID_ADD:
+	case KDBUS_ITEM_ID_REMOVE:
+		if (payload_size != sizeof(struct kdbus_notify_id_change))
+			return -EINVAL;
+		break;
+
+	case KDBUS_ITEM_REPLY_TIMEOUT:
+	case KDBUS_ITEM_REPLY_DEAD:
+		if (payload_size != 0)
+			return -EINVAL;
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * kdbus_items_validate() - validate items passed by user-space
+ * @items:		items to validate
+ * @items_size:		number of items
+ *
+ * This verifies that the passed items pointer is consistent and valid.
+ * Furthermore, each item is checked for:
+ *  - valid "size" value
+ *  - payload is of expected type
+ *  - payload is fully included in the item
+ *  - string payloads are zero-terminated
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_items_validate(const struct kdbus_item *items, size_t items_size)
+{
+	const struct kdbus_item *item;
+	int ret;
+
+	KDBUS_ITEMS_FOREACH(item, items, items_size) {
+		if (!KDBUS_ITEM_VALID(item, items, items_size))
+			return -EINVAL;
+
+		ret = kdbus_item_validate(item);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (!KDBUS_ITEMS_END(item, items, items_size))
+		return -EINVAL;
+
+	return 0;
+}
+
+static struct kdbus_item *kdbus_items_get(const struct kdbus_item *items,
+					  size_t items_size,
+					  unsigned int item_type)
+{
+	const struct kdbus_item *iter, *found = NULL;
+
+	KDBUS_ITEMS_FOREACH(iter, items, items_size) {
+		if (iter->type == item_type) {
+			if (found)
+				return ERR_PTR(-EEXIST);
+			found = iter;
+		}
+	}
+
+	return (struct kdbus_item *)found ? : ERR_PTR(-EBADMSG);
+}
+
+/**
+ * kdbus_items_get_str() - get string from a list of items
+ * @items:		The items to walk
+ * @items_size:		The size of all items
+ * @item_type:		The item type to look for
+ *
+ * This function walks a list of items and searches for items of type
+ * @item_type. If it finds exactly one such item, @str_ret will be set to
+ * the .str member of the item.
+ *
+ * Return: the string, if the item was found exactly once, ERR_PTR(-EEXIST)
+ * if the item was found more than once, and ERR_PTR(-EBADMSG) if there was
+ * no item of the given type.
+ */
+const char *kdbus_items_get_str(const struct kdbus_item *items,
+				size_t items_size,
+				unsigned int item_type)
+{
+	const struct kdbus_item *item;
+
+	item = kdbus_items_get(items, items_size, item_type);
+	return IS_ERR(item) ? ERR_CAST(item) : item->str;
+}
+
+/**
+ * kdbus_item_set() - Set item content
+ * @item:	The item to modify
+ * @type:	The item type to set (KDBUS_ITEM_*)
+ * @data:	Data to copy to item->data, may be %NULL
+ * @len:	Number of bytes in @data
+ *
+ * This sets type, size and data fields of an item. If @data is NULL, the data
+ * memory is cleared.
+ *
+ * Note that you must align your @data memory to 8 bytes. Trailing padding (in
+ * case @len is not 8byte aligned) is cleared by this call.
+ *
+ * Returns: Pointer to the following item.
+ */
+struct kdbus_item *kdbus_item_set(struct kdbus_item *item, u64 type,
+				  const void *data, size_t len)
+{
+	item->type = type;
+	item->size = KDBUS_ITEM_HEADER_SIZE + len;
+
+	if (data) {
+		memcpy(item->data, data, len);
+		memset(item->data + len, 0, KDBUS_ALIGN8(len) - len);
+	} else {
+		memset(item->data, 0, KDBUS_ALIGN8(len));
+	}
+
+	return KDBUS_ITEM_NEXT(item);
+}
diff --git a/ipc/kdbus/item.h b/ipc/kdbus/item.h
new file mode 100644
index 000000000000..eeefd8beac3b
--- /dev/null
+++ b/ipc/kdbus/item.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __KDBUS_ITEM_H
+#define __KDBUS_ITEM_H
+
+#include <linux/kernel.h>
+#include <uapi/linux/kdbus.h>
+
+#include "util.h"
+
+/* generic access and iterators over a stream of items */
+#define KDBUS_ITEM_NEXT(_i) (typeof(_i))(((u8 *)_i) + KDBUS_ALIGN8((_i)->size))
+#define KDBUS_ITEMS_SIZE(_h, _is) ((_h)->size - offsetof(typeof(*_h), _is))
+#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
+#define KDBUS_ITEM_SIZE(_s) KDBUS_ALIGN8(KDBUS_ITEM_HEADER_SIZE + (_s))
+#define KDBUS_ITEM_PAYLOAD_SIZE(_i) ((_i)->size - KDBUS_ITEM_HEADER_SIZE)
+
+#define KDBUS_ITEMS_FOREACH(_i, _is, _s)				\
+	for (_i = _is;							\
+	     ((u8 *)(_i) < (u8 *)(_is) + (_s)) &&			\
+	       ((u8 *)(_i) >= (u8 *)(_is));				\
+	     _i = KDBUS_ITEM_NEXT(_i))
+
+#define KDBUS_ITEM_VALID(_i, _is, _s)					\
+	((_i)->size >= KDBUS_ITEM_HEADER_SIZE &&			\
+	 (u8 *)(_i) + (_i)->size > (u8 *)(_i) &&			\
+	 (u8 *)(_i) + (_i)->size <= (u8 *)(_is) + (_s) &&		\
+	 (u8 *)(_i) >= (u8 *)(_is))
+
+#define KDBUS_ITEMS_END(_i, _is, _s)					\
+	((u8 *)_i == ((u8 *)(_is) + KDBUS_ALIGN8(_s)))
+
+/**
+ * struct kdbus_item_header - Describes the fix part of an item
+ * @size:	The total size of the item
+ * @type:	The item type, one of KDBUS_ITEM_*
+ */
+struct kdbus_item_header {
+	u64 size;
+	u64 type;
+};
+
+int kdbus_item_validate_name(const struct kdbus_item *item);
+int kdbus_item_validate(const struct kdbus_item *item);
+int kdbus_items_validate(const struct kdbus_item *items, size_t items_size);
+const char *kdbus_items_get_str(const struct kdbus_item *items,
+				size_t items_size,
+				unsigned int item_type);
+struct kdbus_item *kdbus_item_set(struct kdbus_item *item, u64 type,
+				  const void *data, size_t len);
+
+#endif
diff --git a/ipc/kdbus/message.c b/ipc/kdbus/message.c
new file mode 100644
index 000000000000..80960756a329
--- /dev/null
+++ b/ipc/kdbus/message.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/cgroup.h>
+#include <linux/cred.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/shmem_fs.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <net/sock.h>
+
+#include "bus.h"
+#include "connection.h"
+#include "domain.h"
+#include "endpoint.h"
+#include "handle.h"
+#include "item.h"
+#include "match.h"
+#include "message.h"
+#include "names.h"
+#include "policy.h"
+
+#define KDBUS_KMSG_HEADER_SIZE offsetof(struct kdbus_kmsg, msg)
+
+static struct kdbus_msg_resources *kdbus_msg_resources_new(void)
+{
+	struct kdbus_msg_resources *r;
+
+	r = kzalloc(sizeof(*r), GFP_KERNEL);
+	if (!r)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&r->kref);
+
+	return r;
+}
+
+static void __kdbus_msg_resources_free(struct kref *kref)
+{
+	struct kdbus_msg_resources *r =
+		container_of(kref, struct kdbus_msg_resources, kref);
+	size_t i;
+
+	for (i = 0; i < r->data_count; ++i) {
+		switch (r->data[i].type) {
+		case KDBUS_MSG_DATA_VEC:
+			/* nothing to do */
+			break;
+		case KDBUS_MSG_DATA_MEMFD:
+			if (r->data[i].memfd.file)
+				fput(r->data[i].memfd.file);
+			break;
+		}
+	}
+
+	for (i = 0; i < r->fds_count; i++)
+		if (r->fds[i])
+			fput(r->fds[i]);
+
+	kfree(r->dst_name);
+	kfree(r->data);
+	kfree(r->fds);
+	kfree(r);
+}
+
+/**
+ * kdbus_msg_resources_ref() - Acquire reference to msg resources
+ * @r:		resources to acquire ref to
+ *
+ * Return: The acquired resource
+ */
+struct kdbus_msg_resources *
+kdbus_msg_resources_ref(struct kdbus_msg_resources *r)
+{
+	if (r)
+		kref_get(&r->kref);
+	return r;
+}
+
+/**
+ * kdbus_msg_resources_unref() - Drop reference to msg resources
+ * @r:		resources to drop reference of
+ *
+ * Return: NULL
+ */
+struct kdbus_msg_resources *
+kdbus_msg_resources_unref(struct kdbus_msg_resources *r)
+{
+	if (r)
+		kref_put(&r->kref, __kdbus_msg_resources_free);
+	return NULL;
+}
+
+/**
+ * kdbus_kmsg_free() - free allocated message
+ * @kmsg:		Message
+ */
+void kdbus_kmsg_free(struct kdbus_kmsg *kmsg)
+{
+	if (!kmsg)
+		return;
+
+	kdbus_msg_resources_unref(kmsg->res);
+	kdbus_meta_conn_unref(kmsg->conn_meta);
+	kdbus_meta_proc_unref(kmsg->proc_meta);
+	kfree(kmsg->iov);
+	kfree(kmsg);
+}
+
+/**
+ * kdbus_kmsg_new() - allocate message
+ * @bus:		Bus this message is allocated on
+ * @extra_size:		Additional size to reserve for data
+ *
+ * Return: new kdbus_kmsg on success, ERR_PTR on failure.
+ */
+struct kdbus_kmsg *kdbus_kmsg_new(struct kdbus_bus *bus, size_t extra_size)
+{
+	struct kdbus_kmsg *m;
+	size_t size;
+	int ret;
+
+	size = sizeof(struct kdbus_kmsg) + KDBUS_ITEM_SIZE(extra_size);
+	m = kzalloc(size, GFP_KERNEL);
+	if (!m)
+		return ERR_PTR(-ENOMEM);
+
+	m->seq = atomic64_inc_return(&bus->domain->last_id);
+	m->msg.size = size - KDBUS_KMSG_HEADER_SIZE;
+	m->msg.items[0].size = KDBUS_ITEM_SIZE(extra_size);
+
+	m->proc_meta = kdbus_meta_proc_new();
+	if (IS_ERR(m->proc_meta)) {
+		ret = PTR_ERR(m->proc_meta);
+		m->proc_meta = NULL;
+		goto exit;
+	}
+
+	m->conn_meta = kdbus_meta_conn_new();
+	if (IS_ERR(m->conn_meta)) {
+		ret = PTR_ERR(m->conn_meta);
+		m->conn_meta = NULL;
+		goto exit;
+	}
+
+	return m;
+
+exit:
+	kdbus_kmsg_free(m);
+	return ERR_PTR(ret);
+}
+
+static int kdbus_handle_check_file(struct file *file)
+{
+	struct inode *inode = file_inode(file);
+	struct socket *sock;
+
+	/*
+	 * Don't allow file descriptors in the transport that themselves allow
+	 * file descriptor queueing. This will eventually be allowed once both
+	 * unix domain sockets and kdbus share a generic garbage collector.
+	 */
+
+	if (file->f_op == &kdbus_handle_ops)
+		return -EOPNOTSUPP;
+
+	if (!S_ISSOCK(inode->i_mode))
+		return 0;
+
+	if (file->f_mode & FMODE_PATH)
+		return 0;
+
+	sock = SOCKET_I(inode);
+	if (sock->sk && sock->ops && sock->ops->family == PF_UNIX)
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static const char * const zeros = "\0\0\0\0\0\0\0";
+
+/*
+ * kdbus_msg_scan_items() - validate incoming data and prepare parsing
+ * @kmsg:		Message
+ * @bus:		Bus the message is sent over
+ *
+ * Return: 0 on success, negative errno on failure.
+ *
+ * Files references in MEMFD or FDS items are pinned.
+ *
+ * On errors, the caller should drop any taken reference with
+ * kdbus_kmsg_free()
+ */
+static int kdbus_msg_scan_items(struct kdbus_kmsg *kmsg,
+				struct kdbus_bus *bus)
+{
+	struct kdbus_msg_resources *res = kmsg->res;
+	const struct kdbus_msg *msg = &kmsg->msg;
+	const struct kdbus_item *item;
+	size_t n, n_vecs, n_memfds;
+	bool has_bloom = false;
+	bool has_name = false;
+	bool has_fds = false;
+	bool is_broadcast;
+	bool is_signal;
+	u64 vec_size;
+
+	is_broadcast = (msg->dst_id == KDBUS_DST_ID_BROADCAST);
+	is_signal = !!(msg->flags & KDBUS_MSG_SIGNAL);
+
+	/* count data payloads */
+	n_vecs = 0;
+	n_memfds = 0;
+	KDBUS_ITEMS_FOREACH(item, msg->items, KDBUS_ITEMS_SIZE(msg, items)) {
+		switch (item->type) {
+		case KDBUS_ITEM_PAYLOAD_VEC:
+			++n_vecs;
+			break;
+		case KDBUS_ITEM_PAYLOAD_MEMFD:
+			++n_memfds;
+			if (item->memfd.size % 8)
+				++n_vecs;
+			break;
+		default:
+			break;
+		}
+	}
+
+	n = n_vecs + n_memfds;
+	if (n > 0) {
+		res->data = kcalloc(n, sizeof(*res->data), GFP_KERNEL);
+		if (!res->data)
+			return -ENOMEM;
+	}
+
+	if (n_vecs > 0) {
+		kmsg->iov = kcalloc(n_vecs, sizeof(*kmsg->iov), GFP_KERNEL);
+		if (!kmsg->iov)
+			return -ENOMEM;
+	}
+
+	/* import data payloads */
+	n = 0;
+	vec_size = 0;
+	KDBUS_ITEMS_FOREACH(item, msg->items, KDBUS_ITEMS_SIZE(msg, items)) {
+		size_t payload_size = KDBUS_ITEM_PAYLOAD_SIZE(item);
+		struct iovec *iov = kmsg->iov + kmsg->iov_count;
+
+		if (++n > KDBUS_MSG_MAX_ITEMS)
+			return -E2BIG;
+
+		switch (item->type) {
+		case KDBUS_ITEM_PAYLOAD_VEC: {
+			struct kdbus_msg_data *d = res->data + res->data_count;
+			void __force __user *ptr = KDBUS_PTR(item->vec.address);
+			size_t size = item->vec.size;
+
+			if (vec_size + size < vec_size)
+				return -EMSGSIZE;
+			if (vec_size + size > KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE)
+				return -EMSGSIZE;
+
+			d->type = KDBUS_MSG_DATA_VEC;
+			d->size = size;
+
+			if (ptr) {
+				if (unlikely(!access_ok(VERIFY_READ, ptr,
+							size)))
+					return -EFAULT;
+
+				d->vec.off = kmsg->pool_size;
+				iov->iov_base = ptr;
+				iov->iov_len = size;
+			} else {
+				d->vec.off = ~0ULL;
+				iov->iov_base = (char __user *)zeros;
+				iov->iov_len = size % 8;
+			}
+
+			if (kmsg->pool_size + iov->iov_len < kmsg->pool_size)
+				return -EMSGSIZE;
+
+			kmsg->pool_size += iov->iov_len;
+			++kmsg->iov_count;
+			++res->vec_count;
+			++res->data_count;
+			vec_size += size;
+
+			break;
+		}
+
+		case KDBUS_ITEM_PAYLOAD_MEMFD: {
+			struct kdbus_msg_data *d = res->data + res->data_count;
+			u64 start = item->memfd.start;
+			u64 size = item->memfd.size;
+			size_t pad = size % 8;
+			int seals, mask;
+			struct file *f;
+
+			if (kmsg->pool_size + size % 8 < kmsg->pool_size)
+				return -EMSGSIZE;
+			if (start + size < start)
+				return -EMSGSIZE;
+
+			if (item->memfd.fd < 0)
+				return -EBADF;
+
+			if (res->memfd_count >= KDBUS_MSG_MAX_MEMFD_ITEMS)
+				return -E2BIG;
+
+			f = fget(item->memfd.fd);
+			if (!f)
+				return -EBADF;
+
+			if (pad) {
+				iov->iov_base = (char __user *)zeros;
+				iov->iov_len = pad;
+
+				kmsg->pool_size += pad;
+				++kmsg->iov_count;
+			}
+
+			++res->data_count;
+			++res->memfd_count;
+
+			d->type = KDBUS_MSG_DATA_MEMFD;
+			d->size = size;
+			d->memfd.start = start;
+			d->memfd.file = f;
+
+			/*
+			 * We only accept a sealed memfd file whose content
+			 * cannot be altered by the sender or anybody else
+			 * while it is shared or in-flight. Other files need
+			 * to be passed with KDBUS_MSG_FDS.
+			 */
+			seals = shmem_get_seals(f);
+			if (seals < 0)
+				return -EMEDIUMTYPE;
+
+			mask = F_SEAL_SHRINK | F_SEAL_GROW |
+				F_SEAL_WRITE | F_SEAL_SEAL;
+			if ((seals & mask) != mask)
+				return -ETXTBSY;
+
+			if (start + size > (u64)i_size_read(file_inode(f)))
+				return -EBADF;
+
+			break;
+		}
+
+		case KDBUS_ITEM_FDS: {
+			unsigned int i;
+			unsigned int fds_count = payload_size / sizeof(int);
+
+			/* do not allow multiple fd arrays */
+			if (has_fds)
+				return -EEXIST;
+			has_fds = true;
+
+			/* Do not allow to broadcast file descriptors */
+			if (is_broadcast)
+				return -ENOTUNIQ;
+
+			if (fds_count > KDBUS_CONN_MAX_FDS_PER_USER)
+				return -EMFILE;
+
+			res->fds = kcalloc(fds_count, sizeof(struct file *),
+					   GFP_KERNEL);
+			if (!res->fds)
+				return -ENOMEM;
+
+			for (i = 0; i < fds_count; i++) {
+				int fd = item->fds[i];
+				int ret;
+
+				/*
+				 * Verify the fd and increment the usage count.
+				 * Use fget_raw() to allow passing O_PATH fds.
+				 */
+				if (fd < 0)
+					return -EBADF;
+
+				res->fds[i] = fget_raw(fd);
+				if (!res->fds[i])
+					return -EBADF;
+
+				res->fds_count++;
+
+				ret = kdbus_handle_check_file(res->fds[i]);
+				if (ret < 0)
+					return ret;
+			}
+
+			break;
+		}
+
+		case KDBUS_ITEM_BLOOM_FILTER: {
+			u64 bloom_size;
+
+			/* do not allow multiple bloom filters */
+			if (has_bloom)
+				return -EEXIST;
+			has_bloom = true;
+
+			bloom_size = payload_size -
+				     offsetof(struct kdbus_bloom_filter, data);
+
+			/*
+			* Allow only bloom filter sizes of a multiple of 64bit.
+			*/
+			if (!KDBUS_IS_ALIGNED8(bloom_size))
+				return -EFAULT;
+
+			/* do not allow mismatching bloom filter sizes */
+			if (bloom_size != bus->bloom.size)
+				return -EDOM;
+
+			kmsg->bloom_filter = &item->bloom_filter;
+			break;
+		}
+
+		case KDBUS_ITEM_DST_NAME:
+			/* do not allow multiple names */
+			if (has_name)
+				return -EEXIST;
+			has_name = true;
+
+			if (!kdbus_name_is_valid(item->str, false))
+				return -EINVAL;
+
+			res->dst_name = kstrdup(item->str, GFP_KERNEL);
+			if (!res->dst_name)
+				return -ENOMEM;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/* name is needed if no ID is given */
+	if (msg->dst_id == KDBUS_DST_ID_NAME && !has_name)
+		return -EDESTADDRREQ;
+
+	if (is_broadcast) {
+		/* Broadcasts can't take names */
+		if (has_name)
+			return -EBADMSG;
+
+		/* All broadcasts have to be signals */
+		if (!is_signal)
+			return -EBADMSG;
+
+		/* Timeouts are not allowed for broadcasts */
+		if (msg->timeout_ns > 0)
+			return -ENOTUNIQ;
+	}
+
+	/*
+	 * Signal messages require a bloom filter, and bloom filters are
+	 * only valid with signals.
+	 */
+	if (is_signal ^ has_bloom)
+		return -EBADMSG;
+
+	return 0;
+}
+
+/**
+ * kdbus_kmsg_new_from_cmd() - create kernel message from send payload
+ * @conn:		Connection
+ * @cmd_send:		Payload of KDBUS_CMD_SEND
+ *
+ * Return: a new kdbus_kmsg on success, ERR_PTR on failure.
+ */
+struct kdbus_kmsg *kdbus_kmsg_new_from_cmd(struct kdbus_conn *conn,
+					   struct kdbus_cmd_send *cmd_send)
+{
+	struct kdbus_kmsg *m;
+	u64 size;
+	int ret;
+
+	ret = kdbus_copy_from_user(&size, KDBUS_PTR(cmd_send->msg_address),
+				   sizeof(size));
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	if (size < sizeof(struct kdbus_msg) || size > KDBUS_MSG_MAX_SIZE)
+		return ERR_PTR(-EINVAL);
+
+	m = kmalloc(size + KDBUS_KMSG_HEADER_SIZE, GFP_KERNEL);
+	if (!m)
+		return ERR_PTR(-ENOMEM);
+
+	memset(m, 0, KDBUS_KMSG_HEADER_SIZE);
+	m->seq = atomic64_inc_return(&conn->ep->bus->domain->last_id);
+
+	m->proc_meta = kdbus_meta_proc_new();
+	if (IS_ERR(m->proc_meta)) {
+		ret = PTR_ERR(m->proc_meta);
+		m->proc_meta = NULL;
+		goto exit_free;
+	}
+
+	m->conn_meta = kdbus_meta_conn_new();
+	if (IS_ERR(m->conn_meta)) {
+		ret = PTR_ERR(m->conn_meta);
+		m->conn_meta = NULL;
+		goto exit_free;
+	}
+
+	if (copy_from_user(&m->msg, KDBUS_PTR(cmd_send->msg_address), size)) {
+		ret = -EFAULT;
+		goto exit_free;
+	}
+
+	if (m->msg.size != size) {
+		ret = -EINVAL;
+		goto exit_free;
+	}
+
+	if (m->msg.flags & ~(KDBUS_MSG_EXPECT_REPLY |
+			     KDBUS_MSG_NO_AUTO_START |
+			     KDBUS_MSG_SIGNAL)) {
+		ret = -EINVAL;
+		goto exit_free;
+	}
+
+	ret = kdbus_items_validate(m->msg.items,
+				   KDBUS_ITEMS_SIZE(&m->msg, items));
+	if (ret < 0)
+		goto exit_free;
+
+	m->res = kdbus_msg_resources_new();
+	if (IS_ERR(m->res)) {
+		ret = PTR_ERR(m->res);
+		m->res = NULL;
+		goto exit_free;
+	}
+
+	/* do not accept kernel-generated messages */
+	if (m->msg.payload_type == KDBUS_PAYLOAD_KERNEL) {
+		ret = -EINVAL;
+		goto exit_free;
+	}
+
+	if (m->msg.flags & KDBUS_MSG_EXPECT_REPLY) {
+		/* requests for replies need timeout and cookie */
+		if (m->msg.timeout_ns == 0 || m->msg.cookie == 0) {
+			ret = -EINVAL;
+			goto exit_free;
+		}
+
+		/* replies may not be expected for broadcasts */
+		if (m->msg.dst_id == KDBUS_DST_ID_BROADCAST) {
+			ret = -ENOTUNIQ;
+			goto exit_free;
+		}
+
+		/* replies may not be expected for signals */
+		if (m->msg.flags & KDBUS_MSG_SIGNAL) {
+			ret = -EINVAL;
+			goto exit_free;
+		}
+	} else {
+		/*
+		 * KDBUS_SEND_SYNC_REPLY is only valid together with
+		 * KDBUS_MSG_EXPECT_REPLY
+		 */
+		if (cmd_send->flags & KDBUS_SEND_SYNC_REPLY) {
+			ret = -EINVAL;
+			goto exit_free;
+		}
+
+		/* replies cannot be signals */
+		if (m->msg.cookie_reply && (m->msg.flags & KDBUS_MSG_SIGNAL)) {
+			ret = -EINVAL;
+			goto exit_free;
+		}
+	}
+
+	ret = kdbus_msg_scan_items(m, conn->ep->bus);
+	if (ret < 0)
+		goto exit_free;
+
+	/* patch-in the source of this message */
+	if (m->msg.src_id > 0 && m->msg.src_id != conn->id) {
+		ret = -EINVAL;
+		goto exit_free;
+	}
+	m->msg.src_id = conn->id;
+
+	return m;
+
+exit_free:
+	kdbus_kmsg_free(m);
+	return ERR_PTR(ret);
+}
diff --git a/ipc/kdbus/message.h b/ipc/kdbus/message.h
new file mode 100644
index 000000000000..af4775850235
--- /dev/null
+++ b/ipc/kdbus/message.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __KDBUS_MESSAGE_H
+#define __KDBUS_MESSAGE_H
+
+#include "util.h"
+#include "metadata.h"
+
+/**
+ * enum kdbus_msg_data_type - Type of kdbus_msg_data payloads
+ * @KDBUS_MSG_DATA_VEC:		Data vector provided by user-space
+ * @KDBUS_MSG_DATA_MEMFD:	Memfd payload
+ */
+enum kdbus_msg_data_type {
+	KDBUS_MSG_DATA_VEC,
+	KDBUS_MSG_DATA_MEMFD,
+};
+
+/**
+ * struct kdbus_msg_data - Data payload as stored by messages
+ * @type:	Type of payload (KDBUS_MSG_DATA_*)
+ * @size:	Size of the described payload
+ * @off:	The offset, relative to the vec slice
+ * @start:	Offset inside the memfd
+ * @file:	Backing file referenced by the memfd
+ */
+struct kdbus_msg_data {
+	unsigned int type;
+	u64 size;
+
+	union {
+		struct {
+			u64 off;
+		} vec;
+		struct {
+			u64 start;
+			struct file *file;
+		} memfd;
+	};
+};
+
+/**
+ * struct kdbus_kmsg_resources - resources of a message
+ * @kref:		Reference counter
+ * @dst_name:		Short-cut to msg for faster lookup
+ * @fds:		Array of file descriptors to pass
+ * @fds_count:		Number of file descriptors to pass
+ * @data:		Array of data payloads
+ * @vec_count:		Number of VEC entries
+ * @memfd_count:	Number of MEMFD entries in @data
+ * @data_count:		Sum of @vec_count + @memfd_count
+ */
+struct kdbus_msg_resources {
+	struct kref kref;
+	const char *dst_name;
+
+	struct file **fds;
+	unsigned int fds_count;
+
+	struct kdbus_msg_data *data;
+	size_t vec_count;
+	size_t memfd_count;
+	size_t data_count;
+};
+
+struct kdbus_msg_resources *
+kdbus_msg_resources_ref(struct kdbus_msg_resources *r);
+struct kdbus_msg_resources *
+kdbus_msg_resources_unref(struct kdbus_msg_resources *r);
+
+/**
+ * struct kdbus_kmsg - internal message handling data
+ * @seq:		Domain-global message sequence number
+ * @notify_type:	Short-cut for faster lookup
+ * @notify_old_id:	Short-cut for faster lookup
+ * @notify_new_id:	Short-cut for faster lookup
+ * @notify_name:	Short-cut for faster lookup
+ * @dst_name_id:	Short-cut to msg for faster lookup
+ * @bloom_filter:	Bloom filter to match message properties
+ * @bloom_generation:	Generation of bloom element set
+ * @notify_entry:	List of kernel-generated notifications
+ * @iov:		Array of iovec, describing the payload to copy
+ * @iov_count:		Number of array members in @iov
+ * @pool_size:		Overall size of inlined data referenced by @iov
+ * @proc_meta:		Appended SCM-like metadata of the sending process
+ * @conn_meta:		Appended SCM-like metadata of the sending connection
+ * @res:		Message resources
+ * @msg:		Message from or to userspace
+ */
+struct kdbus_kmsg {
+	u64 seq;
+	u64 notify_type;
+	u64 notify_old_id;
+	u64 notify_new_id;
+	const char *notify_name;
+
+	u64 dst_name_id;
+	const struct kdbus_bloom_filter *bloom_filter;
+	u64 bloom_generation;
+	struct list_head notify_entry;
+
+	struct iovec *iov;
+	size_t iov_count;
+	u64 pool_size;
+
+	struct kdbus_meta_proc *proc_meta;
+	struct kdbus_meta_conn *conn_meta;
+	struct kdbus_msg_resources *res;
+
+	/* variable size, must be the last member */
+	struct kdbus_msg msg;
+};
+
+struct kdbus_bus;
+struct kdbus_conn;
+
+struct kdbus_kmsg *kdbus_kmsg_new(struct kdbus_bus *bus, size_t extra_size);
+struct kdbus_kmsg *kdbus_kmsg_new_from_cmd(struct kdbus_conn *conn,
+					   struct kdbus_cmd_send *cmd_send);
+void kdbus_kmsg_free(struct kdbus_kmsg *kmsg);
+
+#endif
diff --git a/ipc/kdbus/queue.c b/ipc/kdbus/queue.c
new file mode 100644
index 000000000000..a449464a3975
--- /dev/null
+++ b/ipc/kdbus/queue.c
@@ -0,0 +1,678 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/audit.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/hashtable.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/uio.h>
+
+#include "util.h"
+#include "domain.h"
+#include "connection.h"
+#include "item.h"
+#include "message.h"
+#include "metadata.h"
+#include "queue.h"
+#include "reply.h"
+
+/**
+ * kdbus_queue_init() - initialize data structure related to a queue
+ * @queue:	The queue to initialize
+ */
+void kdbus_queue_init(struct kdbus_queue *queue)
+{
+	INIT_LIST_HEAD(&queue->msg_list);
+	queue->msg_prio_queue = RB_ROOT;
+}
+
+/**
+ * kdbus_queue_peek() - Retrieves an entry from a queue
+ * @queue:		The queue
+ * @priority:		The minimum priority of the entry to peek
+ * @use_priority:	Boolean flag whether or not to peek by priority
+ *
+ * Look for a entry in a queue, either by priority, or the oldest one (FIFO).
+ * The entry is not freed, put off the queue's lists or anything else.
+ *
+ * Return: the peeked queue entry on success, NULL if no suitable msg is found
+ */
+struct kdbus_queue_entry *kdbus_queue_peek(struct kdbus_queue *queue,
+					   s64 priority, bool use_priority)
+{
+	struct kdbus_queue_entry *e;
+
+	if (list_empty(&queue->msg_list))
+		return NULL;
+
+	if (use_priority) {
+		/* get next entry with highest priority */
+		e = rb_entry(queue->msg_prio_highest,
+			     struct kdbus_queue_entry, prio_node);
+
+		/* no entry with the requested priority */
+		if (e->priority > priority)
+			return NULL;
+	} else {
+		/* ignore the priority, return the next entry in the entry */
+		e = list_first_entry(&queue->msg_list,
+				     struct kdbus_queue_entry, entry);
+	}
+
+	return e;
+}
+
+static void kdbus_queue_entry_link(struct kdbus_queue_entry *entry)
+{
+	struct kdbus_queue *queue = &entry->conn->queue;
+	struct rb_node **n, *pn = NULL;
+	bool highest = true;
+
+	lockdep_assert_held(&entry->conn->lock);
+	if (WARN_ON(!list_empty(&entry->entry)))
+		return;
+
+	/* sort into priority entry tree */
+	n = &queue->msg_prio_queue.rb_node;
+	while (*n) {
+		struct kdbus_queue_entry *e;
+
+		pn = *n;
+		e = rb_entry(pn, struct kdbus_queue_entry, prio_node);
+
+		/* existing node for this priority, add to its list */
+		if (likely(entry->priority == e->priority)) {
+			list_add_tail(&entry->prio_entry, &e->prio_entry);
+			goto prio_done;
+		}
+
+		if (entry->priority < e->priority) {
+			n = &pn->rb_left;
+		} else {
+			n = &pn->rb_right;
+			highest = false;
+		}
+	}
+
+	/* cache highest-priority entry */
+	if (highest)
+		queue->msg_prio_highest = &entry->prio_node;
+
+	/* new node for this priority */
+	rb_link_node(&entry->prio_node, pn, n);
+	rb_insert_color(&entry->prio_node, &queue->msg_prio_queue);
+	INIT_LIST_HEAD(&entry->prio_entry);
+
+prio_done:
+	/* add to unsorted fifo list */
+	list_add_tail(&entry->entry, &queue->msg_list);
+}
+
+static void kdbus_queue_entry_unlink(struct kdbus_queue_entry *entry)
+{
+	struct kdbus_queue *queue = &entry->conn->queue;
+
+	lockdep_assert_held(&entry->conn->lock);
+	if (list_empty(&entry->entry))
+		return;
+
+	list_del_init(&entry->entry);
+
+	if (list_empty(&entry->prio_entry)) {
+		/*
+		 * Single entry for this priority, update cached
+		 * highest-priority entry, remove the tree node.
+		 */
+		if (queue->msg_prio_highest == &entry->prio_node)
+			queue->msg_prio_highest = rb_next(&entry->prio_node);
+
+		rb_erase(&entry->prio_node, &queue->msg_prio_queue);
+	} else {
+		struct kdbus_queue_entry *q;
+
+		/*
+		 * Multiple entries for this priority entry, get next one in
+		 * the list. Update cached highest-priority entry, store the
+		 * new one as the tree node.
+		 */
+		q = list_first_entry(&entry->prio_entry,
+				     struct kdbus_queue_entry, prio_entry);
+		list_del(&entry->prio_entry);
+
+		if (queue->msg_prio_highest == &entry->prio_node)
+			queue->msg_prio_highest = &q->prio_node;
+
+		rb_replace_node(&entry->prio_node, &q->prio_node,
+				&queue->msg_prio_queue);
+	}
+}
+
+/**
+ * kdbus_queue_entry_new() - allocate a queue entry
+ * @conn_dst:	destination connection
+ * @kmsg:	kmsg object the queue entry should track
+ * @user:	user to account message on (or NULL for kernel messages)
+ *
+ * Allocates a queue entry based on a given kmsg and allocate space for
+ * the message payload and the requested metadata in the connection's pool.
+ * The entry is not actually added to the queue's lists at this point.
+ *
+ * Return: the allocated entry on success, or an ERR_PTR on failures.
+ */
+struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *conn_dst,
+						const struct kdbus_kmsg *kmsg,
+						struct kdbus_user *user)
+{
+	struct kdbus_msg_resources *res = kmsg->res;
+	const struct kdbus_msg *msg = &kmsg->msg;
+	struct kdbus_queue_entry *entry;
+	size_t memfd_cnt = 0;
+	struct kvec kvec[2];
+	size_t meta_size;
+	size_t msg_size;
+	u64 payload_off;
+	u64 size = 0;
+	int ret = 0;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&entry->entry);
+	entry->priority = msg->priority;
+	entry->dst_name_id = kmsg->dst_name_id;
+	entry->msg_res = kdbus_msg_resources_ref(res);
+	entry->proc_meta = kdbus_meta_proc_ref(kmsg->proc_meta);
+	entry->conn_meta = kdbus_meta_conn_ref(kmsg->conn_meta);
+	entry->conn = kdbus_conn_ref(conn_dst);
+
+	if (kmsg->msg.src_id == KDBUS_SRC_ID_KERNEL)
+		msg_size = msg->size;
+	else
+		msg_size = offsetof(struct kdbus_msg, items);
+
+	/* sum up the size of the needed slice */
+	size = msg_size;
+
+	if (res) {
+		size += res->vec_count *
+			KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
+
+		if (res->memfd_count) {
+			entry->memfd_offset =
+				kcalloc(res->memfd_count, sizeof(size_t),
+					GFP_KERNEL);
+			if (!entry->memfd_offset) {
+				ret = -ENOMEM;
+				goto exit_free_entry;
+			}
+
+			size += res->memfd_count *
+				KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
+		}
+
+		if (res->fds_count)
+			size += KDBUS_ITEM_SIZE(sizeof(int) * res->fds_count);
+
+		if (res->dst_name)
+			size += KDBUS_ITEM_SIZE(strlen(res->dst_name) + 1);
+	}
+
+	/*
+	 * Remember the offset of the metadata part, so we can override
+	 * this part later during kdbus_queue_entry_install().
+	 */
+	entry->meta_offset = size;
+
+	if (entry->proc_meta || entry->conn_meta) {
+		entry->attach_flags =
+			atomic64_read(&conn_dst->attach_flags_recv);
+
+		ret = kdbus_meta_export_prepare(entry->proc_meta,
+						entry->conn_meta,
+						&entry->attach_flags,
+						&meta_size);
+		if (ret < 0)
+			goto exit_free_entry;
+
+		size += meta_size;
+	}
+
+	payload_off = size;
+	size += kmsg->pool_size;
+	size = KDBUS_ALIGN8(size);
+
+	ret = kdbus_conn_quota_inc(conn_dst, user, size,
+				   res ? res->fds_count : 0);
+	if (ret < 0)
+		goto exit_free_entry;
+
+	entry->slice = kdbus_pool_slice_alloc(conn_dst->pool, size, true);
+	if (IS_ERR(entry->slice)) {
+		ret = PTR_ERR(entry->slice);
+		entry->slice = NULL;
+		kdbus_conn_quota_dec(conn_dst, user, size,
+				     res ? res->fds_count : 0);
+		goto exit_free_entry;
+	}
+
+	/* we accounted for exactly 'size' bytes, make sure it didn't grow */
+	WARN_ON(kdbus_pool_slice_size(entry->slice) != size);
+	entry->user = kdbus_user_ref(user);
+
+	/* copy message header */
+	kvec[0].iov_base = (char *)msg;
+	kvec[0].iov_len = msg_size;
+
+	ret = kdbus_pool_slice_copy_kvec(entry->slice, 0, kvec, 1, msg_size);
+	if (ret < 0)
+		goto exit_free_entry;
+
+	/* 'size' will now track the write position */
+	size = msg_size;
+
+	/* create message payload items */
+	if (res) {
+		size_t dst_name_len = 0;
+		unsigned int i;
+		size_t sz = 0;
+
+		if (res->dst_name) {
+			dst_name_len = strlen(res->dst_name) + 1;
+			sz += KDBUS_ITEM_SIZE(dst_name_len);
+		}
+
+		for (i = 0; i < res->data_count; ++i) {
+			struct kdbus_vec v;
+			struct kdbus_memfd m;
+
+			switch (res->data[i].type) {
+			case KDBUS_MSG_DATA_VEC:
+				sz += KDBUS_ITEM_SIZE(sizeof(v));
+				break;
+
+			case KDBUS_MSG_DATA_MEMFD:
+				sz += KDBUS_ITEM_SIZE(sizeof(m));
+				break;
+			}
+		}
+
+		if (sz) {
+			struct kdbus_item *items, *item;
+
+			items = kmalloc(sz, GFP_KERNEL);
+			if (!items) {
+				ret = -ENOMEM;
+				goto exit_free_entry;
+			}
+
+			item = items;
+
+			if (res->dst_name)
+				item = kdbus_item_set(item, KDBUS_ITEM_DST_NAME,
+						      res->dst_name,
+						      dst_name_len);
+
+			for (i = 0; i < res->data_count; ++i) {
+				struct kdbus_msg_data *d = res->data + i;
+				struct kdbus_memfd m = {};
+				struct kdbus_vec v = {};
+
+				switch (d->type) {
+				case KDBUS_MSG_DATA_VEC:
+					v.size = d->size;
+					v.offset = d->vec.off;
+					if (v.offset != ~0ULL)
+						v.offset += payload_off;
+
+					item = kdbus_item_set(item,
+							KDBUS_ITEM_PAYLOAD_OFF,
+							&v, sizeof(v));
+					break;
+
+				case KDBUS_MSG_DATA_MEMFD:
+					/*
+					 * Remember the location of memfds, so
+					 * we can override the content from
+					 * kdbus_queue_entry_install().
+					 */
+					entry->memfd_offset[memfd_cnt++] =
+						msg_size +
+						(char *)item - (char *)items +
+						offsetof(struct kdbus_item,
+							 memfd);
+
+					item = kdbus_item_set(item,
+						       KDBUS_ITEM_PAYLOAD_MEMFD,
+						       &m, sizeof(m));
+					break;
+				}
+			}
+
+			kvec[0].iov_base = items;
+			kvec[0].iov_len = sz;
+
+			ret = kdbus_pool_slice_copy_kvec(entry->slice, size,
+							 kvec, 1, sz);
+			kfree(items);
+
+			if (ret < 0)
+				goto exit_free_entry;
+
+			size += sz;
+		}
+
+		/*
+		 * Remember the location of the FD part, so we can override the
+		 * content in kdbus_queue_entry_install().
+		 */
+		if (res->fds_count) {
+			entry->fds_offset = size;
+			size += KDBUS_ITEM_SIZE(sizeof(int) * res->fds_count);
+		}
+	}
+
+	/* finally, copy over the actual message payload */
+	if (kmsg->iov_count) {
+		ret = kdbus_pool_slice_copy_iovec(entry->slice, payload_off,
+						  kmsg->iov,
+						  kmsg->iov_count,
+						  kmsg->pool_size);
+		if (ret < 0)
+			goto exit_free_entry;
+	}
+
+	return entry;
+
+exit_free_entry:
+	kdbus_queue_entry_free(entry);
+	return ERR_PTR(ret);
+}
+
+/**
+ * kdbus_queue_entry_free() - free resources of an entry
+ * @entry:	The entry to free
+ *
+ * Removes resources allocated by a queue entry, along with the entry itself.
+ * Note that the entry's slice is not freed at this point.
+ */
+void kdbus_queue_entry_free(struct kdbus_queue_entry *entry)
+{
+	if (!entry)
+		return;
+
+	lockdep_assert_held(&entry->conn->lock);
+
+	kdbus_queue_entry_unlink(entry);
+	kdbus_reply_unref(entry->reply);
+
+	if (entry->slice) {
+		kdbus_conn_quota_dec(entry->conn, entry->user,
+				     kdbus_pool_slice_size(entry->slice),
+				     entry->msg_res ?
+						entry->msg_res->fds_count : 0);
+		kdbus_pool_slice_release(entry->slice);
+		kdbus_user_unref(entry->user);
+	}
+
+	kdbus_msg_resources_unref(entry->msg_res);
+	kdbus_meta_conn_unref(entry->conn_meta);
+	kdbus_meta_proc_unref(entry->proc_meta);
+	kdbus_conn_unref(entry->conn);
+	kfree(entry->memfd_offset);
+	kfree(entry);
+}
+
+/**
+ * kdbus_queue_entry_install() - install message components into the
+ *				 receiver's process
+ * @entry:		The queue entry to install
+ * @return_flags:	Pointer to store the return flags for userspace
+ * @install_fds:	Whether or not to install associated file descriptors
+ *
+ * This function will create a slice to transport the message header, the
+ * metadata items and other items for information stored in @entry, and
+ * store it as entry->slice.
+ *
+ * If @install_fds is %true, file descriptors will as well be installed.
+ * This function must always be called from the task context of the receiver.
+ *
+ * Return: 0 on success.
+ */
+int kdbus_queue_entry_install(struct kdbus_queue_entry *entry,
+			      u64 *return_flags, bool install_fds)
+{
+	u64 msg_size = entry->meta_offset;
+	struct kdbus_conn *conn_dst = entry->conn;
+	struct kdbus_msg_resources *res;
+	bool incomplete_fds = false;
+	struct kvec kvec[2];
+	size_t memfds = 0;
+	int i, ret;
+
+	lockdep_assert_held(&conn_dst->lock);
+
+	if (entry->proc_meta || entry->conn_meta) {
+		size_t meta_size;
+
+		ret = kdbus_meta_export(entry->proc_meta,
+					entry->conn_meta,
+					entry->attach_flags,
+					entry->slice,
+					entry->meta_offset,
+					&meta_size);
+		if (ret < 0)
+			return ret;
+
+		msg_size += meta_size;
+	}
+
+	/* Update message size at offset 0 */
+	kvec[0].iov_base = &msg_size;
+	kvec[0].iov_len = sizeof(msg_size);
+
+	ret = kdbus_pool_slice_copy_kvec(entry->slice, 0, kvec, 1,
+					 sizeof(msg_size));
+	if (ret < 0)
+		return ret;
+
+	res = entry->msg_res;
+
+	if (!res)
+		return 0;
+
+	if (res->fds_count) {
+		struct kdbus_item_header hdr;
+		size_t off;
+		int *fds;
+
+		fds = kmalloc_array(res->fds_count, sizeof(int), GFP_KERNEL);
+		if (!fds)
+			return -ENOMEM;
+
+		for (i = 0; i < res->fds_count; i++) {
+			if (install_fds) {
+				fds[i] = get_unused_fd_flags(O_CLOEXEC);
+				if (fds[i] >= 0)
+					fd_install(fds[i],
+						   get_file(res->fds[i]));
+				else
+					incomplete_fds = true;
+			} else {
+				fds[i] = -1;
+			}
+		}
+
+		off = entry->fds_offset;
+
+		hdr.type = KDBUS_ITEM_FDS;
+		hdr.size = KDBUS_ITEM_HEADER_SIZE +
+			   sizeof(int) * res->fds_count;
+
+		kvec[0].iov_base = &hdr;
+		kvec[0].iov_len = sizeof(hdr);
+
+		kvec[1].iov_base = fds;
+		kvec[1].iov_len = sizeof(int) * res->fds_count;
+
+		ret = kdbus_pool_slice_copy_kvec(entry->slice, off,
+						 kvec, 2, hdr.size);
+		kfree(fds);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	for (i = 0; i < res->data_count; ++i) {
+		struct kdbus_msg_data *d = res->data + i;
+		struct kdbus_memfd m;
+
+		if (d->type != KDBUS_MSG_DATA_MEMFD)
+			continue;
+
+		m.start = d->memfd.start;
+		m.size = d->size;
+		m.fd = -1;
+
+		if (install_fds) {
+			m.fd = get_unused_fd_flags(O_CLOEXEC);
+			if (m.fd < 0) {
+				m.fd = -1;
+				incomplete_fds = true;
+			} else {
+				fd_install(m.fd,
+					   get_file(d->memfd.file));
+			}
+		}
+
+		kvec[0].iov_base = &m;
+		kvec[0].iov_len = sizeof(m);
+
+		ret = kdbus_pool_slice_copy_kvec(entry->slice,
+						 entry->memfd_offset[memfds++],
+						 kvec, 1, sizeof(m));
+		if (ret < 0)
+			return ret;
+	}
+
+	if (incomplete_fds)
+		*return_flags |= KDBUS_RECV_RETURN_INCOMPLETE_FDS;
+
+	return 0;
+}
+
+/**
+ * kdbus_queue_entry_enqueue() - enqueue an entry
+ * @entry:		entry to enqueue
+ * @reply:		reply to link to this entry (or NULL if none)
+ *
+ * This enqueues an unqueued entry into the message queue of the linked
+ * connection. It also binds a reply object to the entry so we can remember it
+ * when the message is moved.
+ *
+ * Once this call returns (and the connection lock is released), this entry can
+ * be dequeued by the target connection. Note that the entry will not be removed
+ * from the queue until it is destroyed.
+ */
+void kdbus_queue_entry_enqueue(struct kdbus_queue_entry *entry,
+			       struct kdbus_reply *reply)
+{
+	lockdep_assert_held(&entry->conn->lock);
+
+	if (WARN_ON(entry->reply) || WARN_ON(!list_empty(&entry->entry)))
+		return;
+
+	entry->reply = kdbus_reply_ref(reply);
+	kdbus_queue_entry_link(entry);
+}
+
+/**
+ * kdbus_queue_entry_move() - move queue entry
+ * @e:		queue entry to move
+ * @dst:	destination connection to queue the entry on
+ *
+ * This moves a queue entry onto a different connection. It allocates a new
+ * slice on the target connection and copies the message over. If the copy
+ * succeeded, we move the entry from @src to @dst.
+ *
+ * On failure, the entry is left untouched.
+ *
+ * The queue entry must be queued right now, and after the call succeeds it will
+ * be queued on the destination, but no longer on the source.
+ *
+ * The caller must hold the connection lock of the source *and* destination.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kdbus_queue_entry_move(struct kdbus_queue_entry *e,
+			   struct kdbus_conn *dst)
+{
+	struct kdbus_pool_slice *slice = NULL;
+	struct kdbus_conn *src = e->conn;
+	size_t size, fds;
+	int ret;
+
+	lockdep_assert_held(&src->lock);
+	lockdep_assert_held(&dst->lock);
+
+	if (WARN_ON(IS_ERR(e->user)) || WARN_ON(list_empty(&e->entry)))
+		return -EINVAL;
+	if (src == dst)
+		return 0;
+
+	size = kdbus_pool_slice_size(e->slice);
+	fds = e->msg_res ? e->msg_res->fds_count : 0;
+
+	ret = kdbus_conn_quota_inc(dst, e->user, size, fds);
+	if (ret < 0)
+		return ret;
+
+	slice = kdbus_pool_slice_alloc(dst->pool, size, true);
+	if (IS_ERR(slice)) {
+		ret = PTR_ERR(slice);
+		slice = NULL;
+		goto error;
+	}
+
+	ret = kdbus_pool_slice_copy(slice, e->slice);
+	if (ret < 0)
+		goto error;
+
+	kdbus_queue_entry_unlink(e);
+	kdbus_conn_quota_dec(src, e->user, size, fds);
+	kdbus_pool_slice_release(e->slice);
+	kdbus_conn_unref(e->conn);
+
+	e->slice = slice;
+	e->conn = kdbus_conn_ref(dst);
+	kdbus_queue_entry_link(e);
+
+	return 0;
+
+error:
+	kdbus_pool_slice_release(slice);
+	kdbus_conn_quota_dec(dst, e->user, size, fds);
+	return ret;
+}
diff --git a/ipc/kdbus/queue.h b/ipc/kdbus/queue.h
new file mode 100644
index 000000000000..7f2db96fe308
--- /dev/null
+++ b/ipc/kdbus/queue.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __KDBUS_QUEUE_H
+#define __KDBUS_QUEUE_H
+
+struct kdbus_user;
+
+/**
+ * struct kdbus_queue - a connection's message queue
+ * @msg_list:		List head for kdbus_queue_entry objects
+ * @msg_prio_queue:	RB tree root for messages, sorted by priority
+ * @msg_prio_highest:	Link to the RB node referencing the message with the
+ *			highest priority in the tree.
+ */
+struct kdbus_queue {
+	struct list_head msg_list;
+	struct rb_root msg_prio_queue;
+	struct rb_node *msg_prio_highest;
+};
+
+/**
+ * struct kdbus_queue_entry - messages waiting to be read
+ * @entry:		Entry in the connection's list
+ * @prio_node:		Entry in the priority queue tree
+ * @prio_entry:		Queue tree node entry in the list of one priority
+ * @slice:		Slice in the receiver's pool for the message
+ * @attach_flags:	Attach flags used during slice allocation
+ * @meta_offset:	Offset of first metadata item in slice
+ * @fds_offset:		Offset of FD item in slice
+ * @memfd_offset:	Array of slice-offsets for all memfd items
+ * @priority:		Message priority
+ * @dst_name_id:	The sequence number of the name this message is
+ *			addressed to, 0 for messages sent to an ID
+ * @msg_res:		Message resources
+ * @proc_meta:		Process metadata, captured at message arrival
+ * @conn_meta:		Connection metadata, captured at message arrival
+ * @reply:		The reply block if a reply to this message is expected
+ * @user:		User used for accounting
+ */
+struct kdbus_queue_entry {
+	struct list_head entry;
+	struct rb_node prio_node;
+	struct list_head prio_entry;
+
+	struct kdbus_pool_slice *slice;
+
+	u64 attach_flags;
+	size_t meta_offset;
+	size_t fds_offset;
+	size_t *memfd_offset;
+
+	s64 priority;
+	u64 dst_name_id;
+
+	struct kdbus_msg_resources *msg_res;
+	struct kdbus_meta_proc *proc_meta;
+	struct kdbus_meta_conn *conn_meta;
+	struct kdbus_reply *reply;
+	struct kdbus_conn *conn;
+	struct kdbus_user *user;
+};
+
+struct kdbus_kmsg;
+
+void kdbus_queue_init(struct kdbus_queue *queue);
+struct kdbus_queue_entry *kdbus_queue_peek(struct kdbus_queue *queue,
+					   s64 priority, bool use_priority);
+
+struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *conn_dst,
+						const struct kdbus_kmsg *kmsg,
+						struct kdbus_user *user);
+void kdbus_queue_entry_free(struct kdbus_queue_entry *entry);
+int kdbus_queue_entry_install(struct kdbus_queue_entry *entry,
+			      u64 *return_flags, bool install_fds);
+void kdbus_queue_entry_enqueue(struct kdbus_queue_entry *entry,
+			       struct kdbus_reply *reply);
+int kdbus_queue_entry_move(struct kdbus_queue_entry *entry,
+			   struct kdbus_conn *dst);
+
+#endif /* __KDBUS_QUEUE_H */
diff --git a/ipc/kdbus/reply.c b/ipc/kdbus/reply.c
new file mode 100644
index 000000000000..6b3bd81bbb4d
--- /dev/null
+++ b/ipc/kdbus/reply.c
@@ -0,0 +1,259 @@
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+
+#include "bus.h"
+#include "connection.h"
+#include "endpoint.h"
+#include "message.h"
+#include "metadata.h"
+#include "names.h"
+#include "domain.h"
+#include "item.h"
+#include "notify.h"
+#include "policy.h"
+#include "reply.h"
+#include "util.h"
+
+/**
+ * kdbus_reply_new() - Allocate and set up a new kdbus_reply object
+ * @reply_src:		The connection a reply is expected from
+ * @reply_dst:		The connection this reply object belongs to
+ * @msg:		Message associated with the reply
+ * @name_entry:		Name entry used to send the message
+ * @sync:		Whether or not to make this reply synchronous
+ *
+ * Allocate and fill a new kdbus_reply object.
+ *
+ * Return: New kdbus_conn object on success, ERR_PTR on error.
+ */
+struct kdbus_reply *kdbus_reply_new(struct kdbus_conn *reply_src,
+				    struct kdbus_conn *reply_dst,
+				    const struct kdbus_msg *msg,
+				    struct kdbus_name_entry *name_entry,
+				    bool sync)
+{
+	struct kdbus_reply *r;
+	int ret = 0;
+
+	if (atomic_inc_return(&reply_dst->request_count) >
+	    KDBUS_CONN_MAX_REQUESTS_PENDING) {
+		ret = -EMLINK;
+		goto exit_dec_request_count;
+	}
+
+	r = kzalloc(sizeof(*r), GFP_KERNEL);
+	if (!r) {
+		ret = -ENOMEM;
+		goto exit_dec_request_count;
+	}
+
+	kref_init(&r->kref);
+	INIT_LIST_HEAD(&r->entry);
+	r->reply_src = kdbus_conn_ref(reply_src);
+	r->reply_dst = kdbus_conn_ref(reply_dst);
+	r->cookie = msg->cookie;
+	r->name_id = name_entry ? name_entry->name_id : 0;
+	r->deadline_ns = msg->timeout_ns;
+
+	if (sync) {
+		r->sync = true;
+		r->waiting = true;
+	}
+
+exit_dec_request_count:
+	if (ret < 0) {
+		atomic_dec(&reply_dst->request_count);
+		return ERR_PTR(ret);
+	}
+
+	return r;
+}
+
+static void __kdbus_reply_free(struct kref *kref)
+{
+	struct kdbus_reply *reply =
+		container_of(kref, struct kdbus_reply, kref);
+
+	atomic_dec(&reply->reply_dst->request_count);
+	kdbus_conn_unref(reply->reply_src);
+	kdbus_conn_unref(reply->reply_dst);
+	kfree(reply);
+}
+
+/**
+ * kdbus_reply_ref() - Increase reference on kdbus_reply
+ * @r:		The reply, may be %NULL
+ *
+ * Return: The reply object with an extra reference
+ */
+struct kdbus_reply *kdbus_reply_ref(struct kdbus_reply *r)
+{
+	if (r)
+		kref_get(&r->kref);
+	return r;
+}
+
+/**
+ * kdbus_reply_unref() - Decrease reference on kdbus_reply
+ * @r:		The reply, may be %NULL
+ *
+ * Return: NULL
+ */
+struct kdbus_reply *kdbus_reply_unref(struct kdbus_reply *r)
+{
+	if (r)
+		kref_put(&r->kref, __kdbus_reply_free);
+	return NULL;
+}
+
+/**
+ * kdbus_reply_link() - Link reply object into target connection
+ * @r:		Reply to link
+ */
+void kdbus_reply_link(struct kdbus_reply *r)
+{
+	if (WARN_ON(!list_empty(&r->entry)))
+		return;
+
+	list_add(&r->entry, &r->reply_dst->reply_list);
+	kdbus_reply_ref(r);
+}
+
+/**
+ * kdbus_reply_unlink() - Unlink reply object from target connection
+ * @r:		Reply to unlink
+ */
+void kdbus_reply_unlink(struct kdbus_reply *r)
+{
+	if (!list_empty(&r->entry)) {
+		list_del_init(&r->entry);
+		kdbus_reply_unref(r);
+	}
+}
+
+/**
+ * kdbus_sync_reply_wakeup() - Wake a synchronously blocking reply
+ * @reply:	The reply object
+ * @err:	Error code to set on the remote side
+ *
+ * Remove the synchronous reply object from its connection reply_list, and
+ * wake up remote peer (method origin) with the appropriate synchronous reply
+ * code.
+ */
+void kdbus_sync_reply_wakeup(struct kdbus_reply *reply, int err)
+{
+	if (WARN_ON(!reply->sync))
+		return;
+
+	reply->waiting = false;
+	reply->err = err;
+	wake_up_interruptible(&reply->reply_dst->wait);
+}
+
+/**
+ * kdbus_reply_find() - Find the corresponding reply object
+ * @replying:	The replying connection or NULL
+ * @reply_dst:	The connection the reply will be sent to
+ *		(method origin)
+ * @cookie:	The cookie of the requesting message
+ *
+ * Lookup a reply object that should be sent as a reply by
+ * @replying to @reply_dst with the given cookie.
+ *
+ * Callers must take the @reply_dst lock.
+ *
+ * Return: the corresponding reply object or NULL if not found
+ */
+struct kdbus_reply *kdbus_reply_find(struct kdbus_conn *replying,
+				     struct kdbus_conn *reply_dst,
+				     u64 cookie)
+{
+	struct kdbus_reply *r, *reply = NULL;
+
+	list_for_each_entry(r, &reply_dst->reply_list, entry) {
+		if (r->cookie == cookie &&
+		    (!replying || r->reply_src == replying)) {
+			reply = r;
+			break;
+		}
+	}
+
+	return reply;
+}
+
+/**
+ * kdbus_reply_list_scan_work() - Worker callback to scan the replies of a
+ *				  connection for exceeded timeouts
+ * @work:		Work struct of the connection to scan
+ *
+ * Walk the list of replies stored with a connection and look for entries
+ * that have exceeded their timeout. If such an entry is found, a timeout
+ * notification is sent to the waiting peer, and the reply is removed from
+ * the list.
+ *
+ * The work is rescheduled to the nearest timeout found during the list
+ * iteration.
+ */
+void kdbus_reply_list_scan_work(struct work_struct *work)
+{
+	struct kdbus_conn *conn =
+		container_of(work, struct kdbus_conn, work.work);
+	struct kdbus_reply *reply, *reply_tmp;
+	u64 deadline = ~0ULL;
+	struct timespec64 ts;
+	u64 now;
+
+	ktime_get_ts64(&ts);
+	now = timespec64_to_ns(&ts);
+
+	mutex_lock(&conn->lock);
+	if (!kdbus_conn_active(conn)) {
+		mutex_unlock(&conn->lock);
+		return;
+	}
+
+	list_for_each_entry_safe(reply, reply_tmp, &conn->reply_list, entry) {
+		/*
+		 * If the reply block is waiting for synchronous I/O,
+		 * the timeout is handled by wait_event_*_timeout(),
+		 * so we don't have to care for it here.
+		 */
+		if (reply->sync && !reply->interrupted)
+			continue;
+
+		WARN_ON(reply->reply_dst != conn);
+
+		if (reply->deadline_ns > now) {
+			/* remember next timeout */
+			if (deadline > reply->deadline_ns)
+				deadline = reply->deadline_ns;
+
+			continue;
+		}
+
+		/*
+		 * A zero deadline means the connection died, was
+		 * cleaned up already and the notification was sent.
+		 * Don't send notifications for reply trackers that were
+		 * left in an interrupted syscall state.
+		 */
+		if (reply->deadline_ns != 0 && !reply->interrupted)
+			kdbus_notify_reply_timeout(conn->ep->bus, conn->id,
+						   reply->cookie);
+
+		kdbus_reply_unlink(reply);
+	}
+
+	/* rearm delayed work with next timeout */
+	if (deadline != ~0ULL)
+		schedule_delayed_work(&conn->work,
+				      nsecs_to_jiffies(deadline - now));
+
+	mutex_unlock(&conn->lock);
+
+	kdbus_notify_flush(conn->ep->bus);
+}
diff --git a/ipc/kdbus/reply.h b/ipc/kdbus/reply.h
new file mode 100644
index 000000000000..68d52321a917
--- /dev/null
+++ b/ipc/kdbus/reply.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013-2015 Kay Sievers
+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright (C) 2013-2015 Linux Foundation
+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
+ *
+ * kdbus is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the
+ * Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __KDBUS_REPLY_H
+#define __KDBUS_REPLY_H
+
+/**
+ * struct kdbus_reply - an entry of kdbus_conn's list of replies
+ * @kref:		Ref-count of this object
+ * @entry:		The entry of the connection's reply_list
+ * @reply_src:		The connection the reply will be sent from
+ * @reply_dst:		The connection the reply will be sent to
+ * @queue_entry:	The queue entry item that is prepared by the replying
+ *			connection
+ * @deadline_ns:	The deadline of the reply, in nanoseconds
+ * @cookie:		The cookie of the requesting message
+ * @name_id:		ID of the well-known name the original msg was sent to
+ * @sync:		The reply block is waiting for synchronous I/O
+ * @waiting:		The condition to synchronously wait for
+ * @interrupted:	The sync reply was left in an interrupted state
+ * @err:		The error code for the synchronous reply
+ */
+struct kdbus_reply {
+	struct kref kref;
+	struct list_head entry;
+	struct kdbus_conn *reply_src;
+	struct kdbus_conn *reply_dst;
+	struct kdbus_queue_entry *queue_entry;
+	u64 deadline_ns;
+	u64 cookie;
+	u64 name_id;
+	bool sync:1;
+	bool waiting:1;
+	bool interrupted:1;
+	int err;
+};
+
+struct kdbus_reply *kdbus_reply_new(struct kdbus_conn *reply_src,
+				    struct kdbus_conn *reply_dst,
+				    const struct kdbus_msg *msg,
+				    struct kdbus_name_entry *name_entry,
+				    bool sync);
+
+struct kdbus_reply *kdbus_reply_ref(struct kdbus_reply *r);
+struct kdbus_reply *kdbus_reply_unref(struct kdbus_reply *r);
+
+void kdbus_reply_link(struct kdbus_reply *r);
+void kdbus_reply_unlink(struct kdbus_reply *r);
+
+struct kdbus_reply *kdbus_reply_find(struct kdbus_conn *replying,
+				     struct kdbus_conn *reply_dst,
+				     u64 cookie);
+
+void kdbus_sync_reply_wakeup(struct kdbus_reply *reply, int err);
+void kdbus_reply_list_scan_work(struct work_struct *work);
+
+#endif /* __KDBUS_REPLY_H */
diff --git a/ipc/kdbus/util.h b/ipc/kdbus/util.h
index 9caadb337912..740b19880985 100644
--- a/ipc/kdbus/util.h
+++ b/ipc/kdbus/util.h
@@ -18,7 +18,7 @@
 #include <linux/dcache.h>
 #include <linux/ioctl.h>
 
-#include "kdbus.h"
+#include <uapi/linux/kdbus.h>
 
 /* all exported addresses are 64 bit */
 #define KDBUS_PTR(addr) ((void __user *)(uintptr_t)(addr))
-- 
2.3.1


  parent reply	other threads:[~2015-03-09 13:10 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-03-09 13:09 [PATCH v4 00/14] Add kdbus implementation Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 01/14] kdbus: add documentation Greg Kroah-Hartman
2015-03-09 13:09   ` Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 02/14] kdbus: add uapi header file Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 03/14] kdbus: add driver skeleton, ioctl entry points and utility functions Greg Kroah-Hartman
2015-03-09 13:09   ` Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 04/14] kdbus: add connection pool implementation Greg Kroah-Hartman
2015-03-09 13:09   ` Greg Kroah-Hartman
2015-03-09 13:09 ` Greg Kroah-Hartman [this message]
2015-03-09 13:09 ` [PATCH 06/14] kdbus: add node and filesystem implementation Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 07/14] kdbus: add code to gather metadata Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 08/14] kdbus: add code for notifications and matches Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 09/14] kdbus: add code for buses, domains and endpoints Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 10/14] kdbus: add name registry implementation Greg Kroah-Hartman
2015-03-09 13:09   ` Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 11/14] kdbus: add policy database implementation Greg Kroah-Hartman
2015-03-09 13:09 ` [PATCH 12/14] kdbus: add Makefile, Kconfig and MAINTAINERS entry Greg Kroah-Hartman
2015-03-09 13:09   ` Greg Kroah-Hartman
2015-03-24 15:15   ` Jiri Slaby
2015-03-24 18:51     ` [PATCH] kdbus: Fix CONFIG_KDBUS help text Daniel Mack
2015-03-25  1:05       ` David Herrmann
2015-03-25  9:51       ` Greg KH
2015-03-09 13:09 ` [PATCH 13/14] kdbus: add walk-through user space example Greg Kroah-Hartman
2015-03-09 13:09   ` Greg Kroah-Hartman
2015-03-12 14:52   ` Sasha Levin
2015-03-12 16:27     ` [PATCH] samples/kdbus: add -lrt David Herrmann
2015-03-12 21:40       ` [PATCH] kdbus: " Greg Kroah-Hartman
2015-03-12 16:34     ` [PATCH 13/14] kdbus: add walk-through user space example David Herrmann
2015-03-12 16:34       ` David Herrmann
2015-03-24 16:46   ` Jiri Slaby
2015-03-24 16:46     ` Jiri Slaby
2015-03-24 17:15     ` David Herrmann
2015-03-24 17:15       ` David Herrmann
2015-03-24 17:37       ` Jiri Slaby
2015-03-24 18:22         ` Michal Marek
2015-03-24 18:51           ` Daniel Mack
2015-03-31 13:11         ` [PATCH] samples: kdbus: build kdbus-workers conditionally Daniel Mack
2015-04-01 12:47           ` Greg KH
2015-04-01 21:41             ` Andrew Morton
2015-04-03 11:02               ` Daniel Mack
2015-03-09 13:09 ` [PATCH 14/14] kdbus: add selftests Greg Kroah-Hartman
2015-03-15  5:13 ` [PATCH] kdbus: fix minor typo in the walk-through example Nicolas Iooss
2015-03-15  9:32   ` Greg KH
2015-03-17 19:24 ` [PATCH v4 00/14] Add kdbus implementation Andy Lutomirski
2015-03-18 13:54   ` David Herrmann
2015-03-18 13:54     ` David Herrmann
2015-03-18 18:24     ` Andy Lutomirski
2015-03-18 18:24       ` Andy Lutomirski
2015-03-19 11:26       ` David Herrmann
2015-03-19 11:26         ` David Herrmann
2015-03-19 15:48         ` Andy Lutomirski
2015-03-19 15:48           ` Andy Lutomirski
2015-03-23 15:28           ` David Herrmann
2015-03-23 23:24             ` Andy Lutomirski
2015-03-23 23:24               ` Andy Lutomirski
2015-03-24  0:20               ` Eric W. Biederman
2015-03-24  0:20                 ` Eric W. Biederman
2015-03-25 17:29               ` David Herrmann
2015-03-25 17:29                 ` David Herrmann
2015-03-25 18:12                 ` Andy Lutomirski
2015-03-25 18:12                   ` Andy Lutomirski
2015-03-30 16:56                   ` David Herrmann
2015-03-30 16:56                     ` David Herrmann
2015-03-31 13:58                     ` Andy Lutomirski
2015-03-31 13:58                       ` Andy Lutomirski
2015-03-31 15:10                       ` Tom Gundersen
2015-03-31 15:10                         ` Tom Gundersen
2015-03-31 18:29                         ` Andy Lutomirski
2015-03-31 18:29                           ` Andy Lutomirski
2015-04-03 11:51                           ` David Herrmann
2015-04-03 11:51                             ` David Herrmann
2015-04-05 12:09                             ` Eric W. Biederman
2015-04-05 12:09                               ` Eric W. Biederman
2015-04-05 13:46                               ` Greg Kroah-Hartman
2015-04-05 13:46                                 ` Greg Kroah-Hartman
2015-04-08 22:38                             ` Andy Lutomirski
2015-04-08 22:38                               ` Andy Lutomirski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1425906560-13798-6-git-send-email-gregkh@linuxfoundation.org \
    --to=gregkh@linuxfoundation.org \
    --cc=arnd@arndb.de \
    --cc=daniel@zonque.org \
    --cc=dh.herrmann@gmail.com \
    --cc=ebiederm@xmission.com \
    --cc=gnomes@lxorguk.ukuu.org.uk \
    --cc=jkosina@suse.cz \
    --cc=linux-api@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@amacapital.net \
    --cc=teg@jklm.no \
    --cc=tixxdz@opendz.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.