{
struct kdbus_bus *b;
int ret;
- attach_flags_t attach_flags;
+ ulong attach_flags;
if (bloom->size < 8 || bloom->size > KDBUS_BUS_BLOOM_MAX_SIZE || !KDBUS_IS_ALIGNED8(bloom->size) || bloom->n_hash < 1)
return ERR_PTR(-EINVAL);
{
struct kdbus_meta_payload *meta_payload = kdbus_meta_payload_new(0);
- unsigned meta_size;
+ ulong meta_size;
if (!meta_payload)
return ERR_PTR(-ENOMEM);
ret = kdbus_meta_proc_collect(&meta_payload, attach_flags);
struct kdbus_staging_ptr staging)
{
struct kdbus_conn *conn_dst;
- unsigned int i;
+ ulong i;
lockdep_assert_held(&bus->name_registry.rwlock);
struct kdbus_info info;
struct kdbus_item_header item_hdr;
} header;
- unsigned name_len, cnt = 0;
+ ulong name_len, cnt = 0;
struct kvec kvec[3 + MAX_META_EMIT_VECS_BUS];
- unsigned size;
+ ulong size;
int ret;
- attach_flags_t attach_flags;
+ ulong attach_flags;
struct kdbus_meta_stack meta_stack;
struct kdbus_meta_payload *pay = NULL;
cnt += kdbus_kvec_pad(&kvec[cnt], size);
cnt += kdbus_meta_emit_stack_kvec(&bus->meta, conn, &kvec[cnt], &meta_stack, attach_flags, &size);
if (attach_flags & KDBUS_ATTACH_AUXGROUPS) {
- unsigned pay_size;
+ ulong pay_size;
if (!(pay = kdbus_meta_payload_new(bus->meta.auxgroups_alloc_order))) {
ret = -ENOMEM;
goto exit;
struct kdbus_bus *bus = ep->bus;
struct kdbus_conn *conn;
struct kvec kvec;
- attach_flags_t attach_flags_send;
- attach_flags_t attach_flags_recv;
+ ulong attach_flags_send;
+ ulong attach_flags_recv;
bool is_policy_holder;
bool is_activator;
bool is_monitor;
have_meta_fake = creds || pids || seclabel;
{
- unsigned seclabel_len;
- unsigned seclabel_item_size = 0;
- unsigned conn_description_len;
- unsigned conn_description_item_size = 0;
- unsigned meta_items_offset = have_meta_fake
+ ulong seclabel_len;
+ ulong seclabel_item_size = 0;
+ ulong conn_description_len;
+ ulong conn_description_item_size = 0;
+ ulong meta_items_offset = have_meta_fake
? offsetof(typeof(*conn), meta_fake.meta_items)
: offsetof(typeof(*conn), meta.conn_description);
BUILD_BUG_ON(offsetof(typeof(*conn), meta_fake.meta_items) % 8);
*
* Return: true if the name is currently owned by the connection
*/
-bool kdbus_conn_has_names(struct kdbus_conn *conn, unsigned name_count, u8 const *names)
+bool kdbus_conn_has_names(struct kdbus_conn *conn, ulong name_count, u8 const *names)
{
struct kdbus_name_owner *owner;
- unsigned to_go = name_count;
+ ulong to_go = name_count;
kdbus_assert(conn);
kdbus_assert(conn->ep);
kdbus_assert(conn->ep->bus);
if (!(owner->flags & KDBUS_NAME_IN_QUEUE)) {
char const *owned_name = owner->name->name;
u8 const *iter = names;
- unsigned n = name_count;
+ ulong n = name_count;
for (;;) {
u8 len = *iter++;
if (!memcmp(owned_name, iter, len) && !owned_name[len])
* Return: 0 on success, negative error code on failure.
*/
int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
- unsigned memory, unsigned fds)
+ ulong memory, ulong fds)
{
struct kdbus_quota *quota;
- unsigned available, accounted;
- unsigned id, memory_used;
+ ulong available, accounted;
+ ulong id, memory_used;
int fds_left, msgs_left;
typeof(c->n_quota) n_quota;
kdbus_assert(c);
id = u ? u->id : KDBUS_USER_KERNEL_ID;
if (id >= (n_quota = c->n_quota)) {
- unsigned users;
+ ulong users;
users = max(KDBUS_ALIGN8(id) + 8, id);
if (!(quota = krealloc(c->quota, users * sizeof(*quota), GFP_KERNEL)))
* kdbus_conn_quota_inc() failed).
*/
void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
- unsigned memory, unsigned fds)
+ ulong memory, ulong fds)
{
struct kdbus_quota *quota;
- unsigned int id;
+ ulong id;
kdbus_assert(c);
kdbus_assert(c->quota);
const struct cred *conn_creds,
struct kdbus_policy_db *db,
struct kdbus_conn *whom,
- unsigned int access)
+ ulong access)
{
struct kdbus_name_owner *owner;
bool pass = false;
conn_creds ? : conn->cred,
owner->name->name,
kdbus_strhash(owner->name->name));
- if (res >= (int)access) {
+ if (res >= (long)access) {
pass = true;
break;
}
const struct cred *conn_creds,
const char *name)
{
- unsigned int hash = kdbus_strhash(name);
+ ulong hash = kdbus_strhash(name);
int res;
#ifdef DISABLE_KDBUS_POLICY
struct kdbus_meta_payload *meta_payload = NULL;
struct kdbus_conn_meta *meta = NULL;
struct kvec kvec[1/*info*/ + MAX_META_EMIT_VECS_CONN];
- unsigned cnt = 0;
- unsigned size;
+ ulong cnt = 0;
+ ulong size;
int ret;
- attach_flags_t attach_flags, valid;
+ ulong attach_flags, valid;
bool fake;
struct kdbus_arg argv[] = {
} else {
cnt += kdbus_meta_emit_stack_kvec(&meta->meta, conn, &kvec[cnt], &meta_stack, attach_flags, &size);
if (attach_flags & (KDBUS_ATTACH_AUXGROUPS|KDBUS_ATTACH_NAMES)) {
- unsigned pay_size;
+ ulong pay_size;
if (!(meta_payload = kdbus_meta_payload_new(attach_flags & KDBUS_ATTACH_AUXGROUPS ? meta->meta.auxgroups_alloc_order : 0))) {
ret = -ENOMEM;
goto exit;
u64 *item_attach_send = NULL;
u64 *item_attach_recv = NULL;
struct kdbus_cmd *cmd;
- attach_flags_t attach_send = 0;
- attach_flags_t attach_recv = 0;
+ ulong attach_send = 0;
+ ulong attach_recv = 0;
int ret;
struct kdbus_arg argv[] = {
if (!(meta_payload = kdbus_meta_payload_new(0)))
return -ENOMEM;
if (0 <= (ret = kdbus_meta_proc_collect(&meta_payload, KDBUS_ATTACH_MASK_ALL))) {
- unsigned pay_size = kdbus_meta_payload_size(meta_payload);
+ ulong pay_size = kdbus_meta_payload_size(meta_payload);
if (!(meta = kmalloc(sizeof(*meta) + pay_size, GFP_KERNEL)))
ret = -ENOMEM;
else {
struct kdbus_staging_user *staging;
struct file *cancel_fd = NULL;
struct kdbus_bus *bus = conn->ep->bus;
- unsigned scratch_offset;
+ ulong scratch_offset;
int ret, ret2;
/* command arguments */
struct kdbus_queue queue;
struct kdbus_quota *quota;
- unsigned int n_quota;
+ unsigned n_quota;
/* protected by registry->rwlock */
struct list_head names_list;
- unsigned int name_count;
+ unsigned name_count;
bool privileged;
bool owner;
wur int kdbus_conn_acquire(struct kdbus_conn *conn);
void kdbus_conn_release(struct kdbus_conn *conn);
wur int kdbus_conn_disconnect(struct kdbus_conn *conn, bool ensure_queue_empty);
-wur bool kdbus_conn_has_names(struct kdbus_conn *conn, unsigned name_count, u8 const *names);
+wur bool kdbus_conn_has_names(struct kdbus_conn *conn, ulong name_count, u8 const *names);
wur int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
- unsigned memory, unsigned fds);
+ ulong memory, ulong fds);
void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
- unsigned memory, unsigned fds);
+ ulong memory, ulong fds);
void kdbus_conn_lost_message(struct kdbus_conn *c);
wur int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
struct kdbus_conn *conn_dst,
}
static struct kdbus_node *kdbus_domain_control_new(struct kdbus_domain *domain,
- unsigned int access)
+ ulong access)
{
struct kdbus_node *node;
int ret;
*
* Return: a new kdbus_domain on success, ERR_PTR on failure
*/
-struct kdbus_domain *kdbus_domain_new(unsigned int access)
+struct kdbus_domain *kdbus_domain_new(ulong access)
{
struct kdbus_domain *d;
int ret;
*
* Return: 0 on success, negative error code on failure.
*/
-int kdbus_domain_populate(struct kdbus_domain *domain, unsigned int access)
+int kdbus_domain_populate(struct kdbus_domain *domain, ulong access)
{
struct kdbus_node *control;
struct kdbus_user {
struct kref kref;
struct kdbus_domain *domain;
- unsigned int id;
+ unsigned id;
kuid_t uid;
atomic_t buses;
atomic_t connections;
#define kdbus_domain_from_node(_node) \
container_of((_node), struct kdbus_domain, node)
-wur struct kdbus_domain *kdbus_domain_new(unsigned int access);
+wur struct kdbus_domain *kdbus_domain_new(ulong access);
void kdbus_domain_ref(struct kdbus_domain *domain);
void kdbus_domain_unref(struct kdbus_domain *domain);
-wur int kdbus_domain_populate(struct kdbus_domain *domain, unsigned int access);
+wur int kdbus_domain_populate(struct kdbus_domain *domain, ulong access);
#define KDBUS_USER_KERNEL_ID 0 /* ID 0 is reserved for kernel accounting */
* Return: a new kdbus_ep on success, ERR_PTR on failure.
*/
struct kdbus_ep *kdbus_ep_new(struct kdbus_bus *bus, const char *name,
- unsigned int access, kuid_t uid, kgid_t gid,
+ ulong access, kuid_t uid, kgid_t gid,
bool is_custom)
{
struct kdbus_ep *e;
container_of((_node), struct kdbus_ep, node)
wur struct kdbus_ep *kdbus_ep_new(struct kdbus_bus *bus, const char *name,
- unsigned int access, kuid_t uid, kgid_t gid,
+ ulong access, kuid_t uid, kgid_t gid,
bool policy);
wur struct kdbus_ep *kdbus_ep_ref(struct kdbus_ep *ep);
void kdbus_ep_unref(struct kdbus_ep *ep);
static struct dentry *fs_dir_iop_lookup(struct inode *dir,
struct dentry *dentry,
- unsigned int flags)
+ unsigned flags)
{
struct dentry *dnew = NULL;
struct kdbus_node *parent;
* Superblock Management
*/
-static int fs_super_dop_revalidate(struct dentry *dentry, unsigned int flags)
+static int fs_super_dop_revalidate(struct dentry *dentry, unsigned flags)
{
struct kdbus_node *node;
#include "policy.h"
static bool kdbus_invalid_item_size_type(
- unsigned items_size,
+ ulong items_size,
struct kdbus_item const *__restrict__ items,
struct kdbus_item const *__restrict__ item,
- unsigned *__restrict__ payload_size,
+ ulong *__restrict__ payload_size,
kdbus_item_type_t *__restrict__ type)
{
var(type_big, item->type);
return true;
BUILD_BUG_ON(KDBUS_ITEM_HEADER_SIZE != sizeof(struct kdbus_item_header));
- return (int)(*payload_size = (unsigned)size_big - (unsigned)KDBUS_ITEM_HEADER_SIZE) < 0;
+ return (long)(*payload_size = (ulong)size_big - (ulong)KDBUS_ITEM_HEADER_SIZE) < 0;
}
static int kdbus_args_verify_and_negotiate(struct kdbus_args *args)
var(argv, args->argv);
var(items, args->items);
var(argc, args->argc);
- unsigned items_size = args->items_size;
- unsigned i;
+ ulong items_size = args->items_size;
+ ulong i;
int ret;
if (args->cmd->flags & ~(KDBUS_FLAG_NEGOTIATE | args->allowed_flags))
KDBUS_ITEMS_FOREACH(item, items, items_size) {
struct kdbus_arg *arg;
- unsigned payload_size;
+ ulong payload_size;
kdbus_item_type_t type;
if (kdbus_invalid_item_size_type(items_size, items, item, &payload_size, &type))
if (negotiation) {
struct kdbus_item __user *user;
- unsigned j, num;
+ ulong j, num;
user = (struct kdbus_item __user *)
((u8 __user *)args->user +
return 0;
}
-static int __kdbus_user_size(unsigned *__restrict__ user_size, void __user * __restrict__ argp, int min_size)
+static int __kdbus_user_size(ulong *__restrict__ user_size, void __user * __restrict__ argp, int min_size)
{
u64 user_size_big;
int ret;
* returned if negotiation was requested, 0 if not.
*/
int __kdbus_args_parse(struct kdbus_args *args, void __user *argp,
- unsigned type_size, unsigned items_offset, void **out)
+ ulong type_size, ulong items_offset, void **out)
{
- unsigned user_size, aligned_user_size;
+ ulong user_size, aligned_user_size;
int ret;
if ((ret = __kdbus_user_size(&user_size, argp, type_size)))
}
static int kdbus_msg_examine(struct kdbus_msg __user *puser, struct kdbus_msg *__restrict__ msg, struct kdbus_bus *__restrict__ bus,
- struct kdbus_cmd_send const *__restrict__ cmd, unsigned *__restrict__ p_scratch_offset, unsigned scratch_offset)
+ struct kdbus_cmd_send const *__restrict__ cmd, ulong *__restrict__ p_scratch_offset, ulong scratch_offset)
{
var(flags, msg->flags);
var(dst_id, msg->dst_id);
var(staging, (struct kdbus_staging_user *__restrict__)((uintptr_t)msg - offsetof(struct kdbus_staging_user, msg)));
struct iovec *parts = kdbus_parts_of_staging_user(staging);
var(scratch, (struct kdbus_msg_parse_scratch *__restrict__)((uintptr_t)staging + scratch_offset));
- unsigned items_size;
- unsigned bloom_filter_off, iov_1_len, iov_2_off = 0, active_iov_len = offsetof(typeof(*msg), items);
- unsigned dst_name_off = 0;
- unsigned n_payload = 0;
- unsigned n_parts = N_RESERVED_STAGING_USER_PARTS_BEFORE_PAYLOAD;
- unsigned n_memfds = 0;
- unsigned fd_offset = 0;
- unsigned padding;
- kdbus_ondbg(unsigned dbg_staging_size = PAGE_SIZE << staging->meta_payload.alloc_order);
+ ulong items_size;
+ ulong bloom_filter_off, iov_1_len, iov_2_off = 0, active_iov_len = offsetof(typeof(*msg), items);
+ ulong dst_name_off = 0;
+ ulong n_payload = 0;
+ ulong n_parts = N_RESERVED_STAGING_USER_PARTS_BEFORE_PAYLOAD;
+ ulong n_memfds = 0;
+ ulong fd_offset = 0;
+ ulong padding;
+ kdbus_ondbg(ulong dbg_staging_size = PAGE_SIZE << staging->meta_payload.alloc_order);
/*
* Step 1:
items_size = KDBUS_ITEMS_SIZE(msg, items);
KDBUS_ITEMS_FOREACH(item, msg->items, items_size) {
- unsigned payload_size;
+ ulong payload_size;
kdbus_item_type_t type;
if (kdbus_invalid_item_size_type(items_size, msg->items, item, &payload_size, &type))
break;
case KDBUS_ITEM_PAYLOAD_VEC: {
void __user *ptr;
- unsigned size;
+ ulong size;
if (payload_size != sizeof(struct kdbus_vec))
return -EINVAL;
{
typeof(item->vec.size) size_big = item->vec.size;
/* size is bounded by a 32-bit constant */
- BUILD_BUG_ON(KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE != (unsigned)KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE);
+ BUILD_BUG_ON(KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE != (ulong)KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE);
if (size_big > KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE)
return -EMSGSIZE;
if (!(size = size_big))
vec->iov_base = ptr;
n_payload += vec->iov_len = size;
} else {
- unsigned padding;
+ ulong padding;
item->vec.offset = ~0ULL;
if ((padding = size % 8)) {
var(vec, &parts[n_parts++]);
case KDBUS_ITEM_PAYLOAD_MEMFD: {
typeof(item->memfd.start) memfd_start;
typeof(item->memfd.size) memfd_size;
- unsigned padding;
+ ulong padding;
if (payload_size != sizeof(struct kdbus_memfd))
return -EINVAL;
BUILD_BUG_ON(sizeof(memfd_size) != sizeof(s64));
break;
}
case KDBUS_ITEM_FDS: {
- unsigned n_fds;
+ ulong n_fds;
BUILD_BUG_ON((int)KDBUS_CONN_MAX_FDS_PER_USER != KDBUS_CONN_MAX_FDS_PER_USER);
/* cannot send file-descriptors attached to broadcasts */
if (KDBUS_DST_ID_BROADCAST == dst_id)
return -EINVAL;
if (iov_2_off)
return -EEXIST;
- if ((unsigned)(payload_size - offsetof(struct kdbus_bloom_filter, data)) != (unsigned)bus->bloom.size)
+ if ((ulong)(payload_size - offsetof(struct kdbus_bloom_filter, data)) != (ulong)bus->bloom.size)
return -EDOM;
bloom_filter_off = (uintptr_t)&item->bloom_filter - (uintptr_t)staging;
iov_1_len = KDBUS_ALIGN8(active_iov_len);
*/
if (negotiation) {
- unsigned i, num;
+ ulong i, num;
struct kdbus_item __user *user = (struct kdbus_item __user *)
((u8 __user *)puser +
((uintptr_t)negotiation - (uintptr_t)msg));
kdbus_assert(items_size + offsetof(typeof(*msg), items) == msg->size);
{
- unsigned emit_size = active_iov_len;
+ ulong emit_size = active_iov_len;
if (iov_2_off) { /* bloom filter present */
emit_size += iov_1_len; /* already aligned */
active_iov_len = KDBUS_ALIGN8(active_iov_len);
} else { /* no bloom filter */
bloom_filter_off = 0;
iov_1_len = KDBUS_ALIGN8(active_iov_len);
- iov_2_off = KDBUS_ALIGN8(offsetof(typeof(*staging), msg) + (unsigned)msg->size);
+ iov_2_off = KDBUS_ALIGN8(offsetof(typeof(*staging), msg) + (ulong)msg->size);
active_iov_len = 0;
}
assign_and_assert(staging->emit_size, emit_size);
void __user *__restrict__ argp,
struct kdbus_staging_user *__restrict__ *__restrict__ pptr,
struct kdbus_bus *__restrict__ bus,
- unsigned *__restrict__ p_scratch_offset)
+ ulong *__restrict__ p_scratch_offset)
{
struct kdbus_meta_payload *pay;
struct kdbus_msg *msg;
- unsigned user_size, aligned_user_size;
- unsigned alloc_size_prediction;
+ ulong user_size, aligned_user_size;
+ ulong alloc_size_prediction;
int ret;
kdbus_assert(cmd);
return 0;
}
-static long kdbus_handle_ioctl_control(struct file *file, unsigned int cmd,
+static long kdbus_handle_ioctl_control(struct file *file, ulong cmd,
void __user *argp)
{
struct kdbus_handle *handle = file->private_data;
return ret;
}
-static long kdbus_handle_ioctl_ep(struct file *file, unsigned int cmd,
+static long kdbus_handle_ioctl_ep(struct file *file, ulong cmd,
void __user *buf)
{
struct kdbus_handle *handle = file->private_data;
return ret;
}
-static long kdbus_handle_ioctl_ep_owner(struct file *file, unsigned int command,
+static long kdbus_handle_ioctl_ep_owner(struct file *file, ulong command,
void __user *buf)
{
struct kdbus_handle *handle = file->private_data;
}
static long kdbus_handle_ioctl_connected(struct file *file,
- unsigned int command, void __user *buf)
+ ulong command, void __user *buf)
{
struct kdbus_handle *handle = file->private_data;
struct kdbus_conn *conn = handle->conn;
return ret;
}
-static long kdbus_handle_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long kdbus_handle_ioctl(struct file *file, unsigned cmd,
+ ulong arg)
{
struct kdbus_handle *handle = file->private_data;
struct kdbus_node *node = kdbus_node_from_inode(file_inode(file));
return ret < 0 ? ret : 0;
}
-static unsigned int kdbus_handle_poll(struct file *file,
+static unsigned kdbus_handle_poll(struct file *file,
struct poll_table_struct *wait)
{
struct kdbus_handle *handle = file->private_data;
enum kdbus_handle_type type;
- unsigned int mask = POLLOUT | POLLWRNORM;
+ ulong mask = POLLOUT | POLLWRNORM;
/*
* This pairs with smp_wmb() during handle setup. It guarantees that
};
wur int __kdbus_args_parse(struct kdbus_args *args, void __user *argp,
- unsigned type_size, unsigned items_offset, void **out);
+ ulong type_size, ulong items_offset, void **out);
wur int kdbus_args_clear(struct kdbus_args *args, int ret);
#define kdbus_args_parse(_args, _argp, _v) \
void __user *__restrict__ argp,
struct kdbus_staging_user *__restrict__ *__restrict__ pptr,
struct kdbus_bus *__restrict__ bus,
- unsigned *p_scratch_offset);
+ ulong *p_scratch_offset);
-wur struct kdbus_msg_parse_scratch {
+struct kdbus_msg_parse_scratch {
unsigned fd_offset;
unsigned n_memfds;
unsigned memfd_offsets[0]; /* [n_memfds] */
int kdbus_item_validate_name(const struct kdbus_item *item)
{
const char *name = item->str;
- unsigned int i;
- unsigned len;
+ ulong i;
+ ulong len;
if (item->size < KDBUS_ITEM_HEADER_SIZE + 2)
return -EINVAL;
#include "util.h"
/* generic access and iterators over a stream of items */
-#define KDBUS_ITEM_NEXT_NOALIGN(_i) ((typeof(_i))((uintptr_t)(_i) + (unsigned)(_i)->size))
-#define KDBUS_ITEM_NEXT(_i) ((typeof(_i))((uintptr_t)(_i) + KDBUS_ALIGN8((unsigned)(_i)->size)))
-#define KDBUS_ITEMS_SIZE(_h, _is) ((unsigned)(_h)->size - offsetof(typeof(*(_h)), _is))
+#define KDBUS_ITEM_NEXT_NOALIGN(_i) ((typeof(_i))((uintptr_t)(_i) + (ulong)(_i)->size))
+#define KDBUS_ITEM_NEXT(_i) ((typeof(_i))((uintptr_t)(_i) + KDBUS_ALIGN8((ulong)(_i)->size)))
+#define KDBUS_ITEMS_SIZE(_h, _is) ((ulong)(_h)->size - offsetof(typeof(*(_h)), _is))
#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
#define KDBUS_ITEM_SIZE(_s) KDBUS_ALIGN8(KDBUS_ITEM_HEADER_SIZE + (_s))
-#define KDBUS_ITEM_PAYLOAD_SIZE(_i) ((unsigned)(_i)->size - KDBUS_ITEM_HEADER_SIZE)
+#define KDBUS_ITEM_PAYLOAD_SIZE(_i) ((ulong)(_i)->size - KDBUS_ITEM_HEADER_SIZE)
#define KDBUS_ITEMS_FOREACH(_i, _is, _s) \
for ((_i) = (_is); \
#define _KDBUS_ITEM_TYPE_LAST_ KDBUS_ITEM_REPLY_DEAD
wur int kdbus_item_validate_name(const struct kdbus_item *item);
-wur int kdbus_item_validate(unsigned size, kdbus_item_type_t type, const struct kdbus_item *item);
+wur int kdbus_item_validate(ulong size, kdbus_item_type_t type, const struct kdbus_item *item);
/*
* This verifies the string at position @str with size @size is properly
* zero-terminated and does not contain a 0-byte but at the end.
*/
-static inline wur bool kdbus_str_valid(const char *str, unsigned size)
+static inline wur bool kdbus_str_valid(const char *str, ulong size)
{
return size && memchr(str, '\0', size) == str + size - 1;
}
-static inline void kdbus_copy_string(char * __restrict__ dest, char const * __restrict__ src, unsigned src_len) {
+static inline void kdbus_copy_string(char * __restrict__ dest, char const * __restrict__ src, ulong src_len) {
/* trailing '\0' and zero padding */
*((u64*)((uintptr_t)(dest + src_len) & (uintptr_t)~7ULL)) = 0;
memcpy(dest, src, src_len);
const struct kdbus_staging_user *s)
{
u8 const *ptr;
- unsigned long const *bloom_mask;
+ ulong const *bloom_mask;
struct kdbus_bloom_filter const *filter;
- unsigned filter_generation_off;
- unsigned name_count;
- unsigned bus_bloom_size;
- unsigned bloom_blocks;
- unsigned n;
+ ulong filter_generation_off;
+ ulong name_count;
+ ulong bus_bloom_size;
+ ulong bloom_blocks;
+ ulong n;
if (kdbus_id_mismatch(entry->src_id, c->id) ||
kdbus_id_mismatch(entry->dst_id, s->msg->dst_id))
goto match;
} else {
struct kdbus_item *item = staging.kernel->msg->items;
- unsigned index = (kdbus_item_type_t)item->type - _KDBUS_ITEM_KERNEL_BASE;
+ ulong index = (kdbus_item_type_t)item->type - _KDBUS_ITEM_KERNEL_BASE;
struct list_head *head = &mdb->kernel_match_list[index];
switch (index) {
}
static u8 kdbus_name_len(u64 size, char const *name) {
- unsigned len;
+ ulong len;
char const *ptr;
kdbus_assert(size);
BUILD_BUG_ON(KDBUS_NAME_MAX_LEN > 255);
struct kdbus_cmd_match *cmd;
struct kdbus_item *item;
struct kdbus_item *kernel_item = NULL;
- unsigned bus_bloom_size = conn->ep->bus->bloom.size;
- unsigned user_bloom_count = 0;
- unsigned items_size;
+ ulong bus_bloom_size = conn->ep->bus->bloom.size;
+ ulong user_bloom_count = 0;
+ ulong items_size;
int user_size = 0; /* 0 == no match detected yet, <0 == match impossible, >0 == valid match of given size */
int kernel_size = 0; /* 0 == no match detected yet, <0 == match impossible, >0 == valid match of given size */
int ret;
} else { /* with items */
KDBUS_ITEMS_FOREACH(item, cmd->items, items_size) {
kdbus_item_type_t type = item->type;
- unsigned size = item->size - offsetof(struct kdbus_item, data);
+ ulong size = item->size - offsetof(struct kdbus_item, data);
switch (type) {
case KDBUS_ITEM_ID_REMOVE:
item = cmd->items;
for (;;) {
if (KDBUS_ITEM_BLOOM_MASK == (kdbus_item_type_t)item->type) {
- unsigned size = item->size - offsetof(struct kdbus_item, data);
+ ulong size = item->size - offsetof(struct kdbus_item, data);
*(s32*)out = size;
memcpy(out+sizeof(u64), item->data, size);
out += sizeof(u64) + size;
#include "names.h"
#include "policy.h"
-static struct kdbus_gaps *kdbus_gaps_new(unsigned n_memfds, unsigned n_fds)
+static struct kdbus_gaps *kdbus_gaps_new(ulong n_memfds, ulong n_fds)
{
struct kdbus_gaps *gaps;
- unsigned n_total_fds = n_memfds + n_fds;
+ ulong n_total_fds = n_memfds + n_fds;
/*
* fds are at the end (files[n-n_fds..n-1], single offset at gaps->offset[n-n_fds])
{
bool incomplete_fds = false;
struct kvec kvec;
- unsigned order = 0; /* initialization unnecessary, just to appease -Wmaybe-uninitialized */
- unsigned first_fd_idx;
+ ulong order = 0; /* initialization unnecessary, just to appease -Wmaybe-uninitialized */
+ ulong first_fd_idx;
int n_fds, n_total_fds;
int ret, i, *__restrict__ fds;
unsigned const *__restrict__ offsets;
put_unused_fd(fd);
} while (++first_fd_idx < n_total_fds);
if (n_total_fds > sizeof(fds_stack_buffer)/sizeof(fds_stack_buffer[0]))
- free_pages((unsigned long)fds, order);
+ free_pages((ulong)fds, order);
*out_incomplete = incomplete_fds;
return ret;
}
struct kdbus_staging_kernel *kdbus_staging_kernel_new(struct kdbus_bus *bus,
u64 dst, u64 cookie_timeout,
- unsigned it_size, kdbus_item_type_t it_type)
+ ulong it_size, kdbus_item_type_t it_type)
{
struct kdbus_staging_kernel *staging;
struct kdbus_item *ts;
- unsigned item_size = KDBUS_ITEM_HEADER_SIZE + it_size;
- unsigned aligned_item_size = KDBUS_ALIGN8(item_size);
- unsigned size_but_item = offsetof(struct kdbus_msg, items) +
+ ulong item_size = KDBUS_ITEM_HEADER_SIZE + it_size;
+ ulong aligned_item_size = KDBUS_ALIGN8(item_size);
+ ulong size_but_item = offsetof(struct kdbus_msg, items) +
KDBUS_ITEM_SIZE(sizeof(struct kdbus_timestamp));
staging = kmalloc(sizeof(*staging) + size_but_item + aligned_item_size, GFP_TEMPORARY);
int kdbus_staging_user_init(struct kdbus_bus *__restrict__ bus,
struct kdbus_cmd_send *__restrict__ cmd,
struct kdbus_staging_user *__restrict__ staging,
- unsigned scratch_offset)
+ ulong scratch_offset)
{
struct kdbus_gaps *__restrict__ gaps;
int ret;
int i;
- kdbus_ondbg(unsigned dbg_staging_size = PAGE_SIZE << staging->meta_payload.alloc_order);
+ kdbus_ondbg(ulong dbg_staging_size = PAGE_SIZE << staging->meta_payload.alloc_order);
kdbus_assert(scratch_offset + sizeof(struct kdbus_msg_parse_scratch) < dbg_staging_size);
/*
var(n_memfds, scratch->n_memfds);
struct kdbus_item *__restrict__ fd_item;
unsigned *__restrict__ offsets;
- unsigned n_fds = 0;
+ ulong n_fds = 0;
kdbus_assert(fd_offset || n_memfds);
kdbus_assert((uintptr_t)&scratch->memfd_offsets[n_memfds] - (uintptr_t)staging <= dbg_staging_size);
if (fd_offset) {
i = 0;
while (i < n_memfds) {
- unsigned off = scratch->memfd_offsets[i];
+ ulong off = scratch->memfd_offsets[i];
struct kdbus_memfd *memfd = (struct kdbus_memfd *)((uintptr_t)staging->msg + off);
struct file *f = kdbus_get_memfd(memfd);
if (IS_ERR(f)) {
kdbus_ondbg(staging->auxgroup_off = 0;)
{ /* the final step - create timestamp skeleton after msg ends, store message number in it */
- var(ts, (struct kdbus_item *__restrict__)KDBUS_ALIGN8((uintptr_t)staging->msg + (unsigned)staging->msg->size));
+ var(ts, (struct kdbus_item *__restrict__)KDBUS_ALIGN8((uintptr_t)staging->msg + (ulong)staging->msg->size));
ts->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_timestamp));
ts->type = KDBUS_ITEM_TIMESTAMP;
ts->timestamp.seqnum = atomic64_inc_return(&bus->last_message_id);
struct kdbus_pool_slice *slice;
struct iovec *parts;
struct kdbus_staging_user *__restrict__ staging = *pstaging;
- unsigned size, n_fds, cnt = 0;
+ ulong size, n_fds, cnt = 0;
int ret;
- attach_flags_t attach = kdbus_meta_msg_mask(src, dst) & (
+ ulong attach = kdbus_meta_msg_mask(src, dst) & (
src->have_meta_fake
? ~(KDBUS_ATTACH_MASK_PROC_DST_AGNOSTIC|KDBUS_ATTACH_MASK_PROC_DST_SENSITIVE) /* fake meta circumvent emission of process-related metadata */
: KDBUS_ATTACH_MASK_ALL);
- attach_flags_t const missing = attach & ~staging->meta_collected & ~KDBUS_ATTACH_CONN_DESCRIPTION;
+ ulong const missing = attach & ~staging->meta_collected & ~KDBUS_ATTACH_CONN_DESCRIPTION;
struct kdbus_meta_stack meta_stack;
if (missing) {
- attach_flags_t gathered = 0;
+ ulong gathered = 0;
BUILD_BUG_ON(offsetof(typeof(*staging), meta_payload));
/*
* Step 1:
cnt += kdbus_meta_emit_stack_iovec(&staging->meta, dst, &parts[cnt], &meta_stack, attach, &size);
cnt += kdbus_meta_emit_iovecs(&parts[cnt], kdbus_meta_payload_ptr(kdbus_meta_payload_of_staging_user(staging)), attach & (KDBUS_ATTACH_MASK_PROC_DST_AGNOSTIC|KDBUS_ATTACH_AUXGROUPS|KDBUS_ATTACH_NAMES), &size, staging->total_meta_name_size);
if (attach & KDBUS_ATTACH_CONN_DESCRIPTION && (item = src->description)) {
- unsigned item_size = item->size;
+ ulong item_size = item->size;
parts[cnt].iov_base = item;
parts[cnt++].iov_len = KDBUS_ALIGN8(item_size);
size = KDBUS_ALIGN8(size) + item_size;
size = KDBUS_ALIGN8(size);
{
- unsigned n_payload = staging->n_payload;
+ ulong n_payload = staging->n_payload;
kdbus_assert(!(n_payload & 7));
if (n_payload) {
int diff = size - staging->i_payload;
*/
if (diff) {
- unsigned items_size = KDBUS_ITEMS_SIZE(staging->msg, items);
+ ulong items_size = KDBUS_ITEMS_SIZE(staging->msg, items);
KDBUS_ITEMS_FOREACH(item, staging->msg->items, items_size)
if (KDBUS_ITEM_PAYLOAD_OFF == (kdbus_item_type_t)item->type && ~0ULL != item->vec.offset)
item->vec.offset += diff;
#ifdef KDBUS_DBG
{
- unsigned dbg_i = 0;
- unsigned dbg_size = parts[0].iov_len;
+ ulong dbg_i = 0;
+ ulong dbg_size = parts[0].iov_len;
bool dbg_seen_stack_iovec = false;
bool dbg_seen_description = false;
void *dbg_prev_ptr = NULL;
- unsigned dbg_prev_len = 0;
+ ulong dbg_prev_len = 0;
kdbus_assert(dbg_size);
kdbus_assert(dbg_size < size);
while (++dbg_i < N_RESERVED_STAGING_USER_PARTS_BEFORE_PAYLOAD) {
void * const dbg_ptr = parts[dbg_i].iov_base;
- unsigned const dbg_len = parts[dbg_i].iov_len;
+ ulong const dbg_len = parts[dbg_i].iov_len;
kdbus_assert((uintptr_t)&parts[dbg_i+1] <= (uintptr_t)staging + staging->meta_payload.meta_offset);
if (!dbg_len)
continue;
/* payload pointers point to payload or zero (must be user pointers or zero) */
while (dbg_i < staging->n_parts) {
void * const dbg_ptr = parts[dbg_i].iov_base;
- unsigned const dbg_len = parts[dbg_i].iov_len;
+ ulong const dbg_len = parts[dbg_i].iov_len;
++dbg_i;
kdbus_assert((uintptr_t)&parts[dbg_i] <= (uintptr_t)staging + staging->meta_payload.meta_offset);
kdbus_assert(dbg_len);
struct kdbus_conn *dst)
{
struct kdbus_pool_slice *slice = NULL;
- unsigned size = KDBUS_ALIGN8(staging->msg->size);
+ ulong size = KDBUS_ALIGN8(staging->msg->size);
struct iovec iov;
int ret;
/* timestamp item is stored directly after message proper, iovecs directly after timestamp */
static inline wur struct kdbus_item *kdbus_timestamp_of_staging_user(struct kdbus_staging_user *staging) {
- return (struct kdbus_item *)(KDBUS_ALIGN8((uintptr_t)staging->msg + (unsigned)staging->msg->size));
+ return (struct kdbus_item *)(KDBUS_ALIGN8((uintptr_t)staging->msg + (ulong)staging->msg->size));
}
static inline wur struct iovec *kdbus_parts_of_staging_user(struct kdbus_staging_user *staging) {
return (struct iovec *)((uintptr_t)kdbus_timestamp_of_staging_user(staging) + KDBUS_ITEM_SIZE(sizeof(struct kdbus_timestamp)));
wur struct kdbus_staging_kernel *kdbus_staging_kernel_new(struct kdbus_bus *bus,
u64 dst, u64 cookie_timeout,
- unsigned it_size, kdbus_item_type_t it_type);
+ ulong it_size, kdbus_item_type_t it_type);
wur int kdbus_staging_user_init(struct kdbus_bus *__restrict__ bus,
struct kdbus_cmd_send *__restrict__ cmd,
struct kdbus_staging_user *__restrict__ staging,
- unsigned scratch_offset);
+ ulong scratch_offset);
void kdbus_staging_kernel_free(struct kdbus_staging_kernel *staging);
void kdbus_staging_user_destroy(struct kdbus_staging_user *staging);
wur struct kdbus_pool_slice *kdbus_staging_user_emit(struct kdbus_staging_user *__restrict__ *__restrict__ staging,
#include "metadata.h"
#include "names.h"
-void kdbus_dst_sensitive_meta_common_destroy(struct kdbus_dst_sensitive_meta_common *common, attach_flags_t valid)
+void kdbus_dst_sensitive_meta_common_destroy(struct kdbus_dst_sensitive_meta_common *common, ulong valid)
{
if (valid & KDBUS_ATTACH_PIDS) {
put_pid(common->pid);
}
}
-void kdbus_dst_sensitive_meta_real_destroy(struct kdbus_dst_sensitive_meta *meta, attach_flags_t valid)
+void kdbus_dst_sensitive_meta_real_destroy(struct kdbus_dst_sensitive_meta *meta, ulong valid)
{
/* fake_creds are a locally allocated structure - nothing to do */
if (valid & (KDBUS_ATTACH_CREDS|KDBUS_ATTACH_AUXGROUPS|KDBUS_ATTACH_CAPS)) {
}
}
-void kdbus_dst_sensitive_meta_destroy(struct kdbus_dst_sensitive_meta *meta, attach_flags_t valid)
+void kdbus_dst_sensitive_meta_destroy(struct kdbus_dst_sensitive_meta *meta, ulong valid)
{
kdbus_dst_sensitive_meta_common_destroy(&meta->common, valid);
kdbus_dst_sensitive_meta_real_destroy(meta, valid);
}
/** Return: number of iovecs emitted */
-wur int _kdbus_meta_emit_stack_(struct kdbus_dst_sensitive_meta_common const *__restrict__ meta, struct kdbus_conn const *__restrict__ dst, struct k_or_io_vec vec, struct kdbus_meta_stack *__restrict__ buf, attach_flags_t what, unsigned *__restrict__ size, bool fake)
+wur int _kdbus_meta_emit_stack_(struct kdbus_dst_sensitive_meta_common const *__restrict__ meta, struct kdbus_conn const *__restrict__ dst, struct k_or_io_vec vec, struct kdbus_meta_stack *__restrict__ buf, ulong what, ulong *__restrict__ size, bool fake)
{
struct kdbus_item *__restrict__ item;
struct user_namespace *__restrict__ user_ns = dst->cred->user_ns;
- unsigned outsize=0;
+ ulong outsize=0;
BUILD_BUG_ON(sizeof(*buf) != KDBUS_ITEM_SIZE(sizeof(struct kdbus_meta_caps)) + KDBUS_ITEM_SIZE(sizeof(struct kdbus_creds)) + KDBUS_ITEM_SIZE(sizeof(struct kdbus_pids)) + KDBUS_ITEM_SIZE(sizeof(struct kdbus_audit)));
BUILD_BUG_ON(offsetof(struct kdbus_dst_sensitive_meta, common));
item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_##TYPE_STRUCT));\
item->type = KDBUS_ITEM_##TYPE_ENUM;\
if (sizeof(struct kdbus_##TYPE_STRUCT) % 8)\
- *((unsigned long*)((uintptr_t)buf + outsize) - 1) = 0; /* clear padding */\
+ *((ulong*)((uintptr_t)buf + outsize) - 1) = 0; /* clear padding */\
} while (0)
if (what & KDBUS_ATTACH_PIDS) { /* common - no need to check fake */
#undef WRITE_HEAD
}
-wur attach_flags_t kdbus_dst_sensitive_meta_collect(struct kdbus_dst_sensitive_meta *meta, attach_flags_t what)
+wur ulong kdbus_dst_sensitive_meta_collect(struct kdbus_dst_sensitive_meta *meta, ulong what)
{
/* creds, auxgrps and caps share "struct cred" as context */
if (what & (KDBUS_ATTACH_CREDS|KDBUS_ATTACH_AUXGROUPS|KDBUS_ATTACH_CAPS)) {
return valid;
}
-static wur struct kdbus_meta_payload *kdbus_meta_payload_expand(struct kdbus_meta_payload *__restrict__ *__restrict__ pptr, unsigned missing_space)
+static wur struct kdbus_meta_payload *kdbus_meta_payload_expand(struct kdbus_meta_payload *__restrict__ *__restrict__ pptr, ulong missing_space)
{
var(old_ptr, *pptr);
- unsigned old_alloc_order = old_ptr->alloc_order;
- unsigned old_full_size = PAGE_SIZE << old_alloc_order;
- unsigned new_alloc_order = kdbus_page_alloc_order(old_full_size + missing_space);
- unsigned new_full_size = PAGE_SIZE << new_alloc_order;
- unsigned tail_offset = old_ptr->tail_offset;
+ ulong old_alloc_order = old_ptr->alloc_order;
+ ulong old_full_size = PAGE_SIZE << old_alloc_order;
+ ulong new_alloc_order = kdbus_page_alloc_order(old_full_size + missing_space);
+ ulong new_full_size = PAGE_SIZE << new_alloc_order;
+ ulong tail_offset = old_ptr->tail_offset;
var(new_ptr, (struct kdbus_meta_payload *__restrict__)__get_free_pages(GFP_TEMPORARY, new_alloc_order));
if (!new_ptr)
return NULL;
kdbus_assert(tail_offset <= old_full_size);
memcpy(new_ptr, old_ptr, tail_offset);
- free_pages((unsigned long)old_ptr, old_alloc_order);
+ free_pages((ulong)old_ptr, old_alloc_order);
new_ptr->alloc_order = new_alloc_order;
new_ptr->meta_space = new_full_size - tail_offset;
*pptr = new_ptr;
return new_ptr;
}
-static wur int kdbus_meta_payload_write_head_and_pad(struct kdbus_meta_payload * __restrict__ * __restrict__ pptr, kdbus_item_type_t type, unsigned unpadded_item_size)
+static wur int kdbus_meta_payload_write_head_and_pad(struct kdbus_meta_payload * __restrict__ * __restrict__ pptr, kdbus_item_type_t type, ulong unpadded_item_size)
{
var(pay, *pptr);
struct kdbus_item * __restrict__ item;
return 0;
}
-static void kdbus_meta_payload_write_head_preallocated(struct kdbus_meta_payload *__restrict__ pay, kdbus_item_type_t type, unsigned item_size)
+static void kdbus_meta_payload_write_head_preallocated(struct kdbus_meta_payload *__restrict__ pay, kdbus_item_type_t type, ulong item_size)
{
struct kdbus_item * __restrict__ item = kdbus_meta_payload_item(pay);
item->size = item_size;
kdbus_meta_payload_advance(pay, item_size);
}
-static wur int kdbus_meta_payload_write_head(struct kdbus_meta_payload * __restrict__ * __restrict__ pptr, kdbus_item_type_t type, unsigned item_size)
+static wur int kdbus_meta_payload_write_head(struct kdbus_meta_payload * __restrict__ * __restrict__ pptr, kdbus_item_type_t type, ulong item_size)
{
var(pay, *pptr);
int missing_space = item_size - kdbus_meta_payload_space(pay);
return 0;
}
-void kdbus_write_string_item(struct kdbus_item *__restrict__ item, kdbus_item_type_t type, char const *__restrict__ str, unsigned len)
+void kdbus_write_string_item(struct kdbus_item *__restrict__ item, kdbus_item_type_t type, char const *__restrict__ str, ulong len)
{
- unsigned item_size = KDBUS_ITEM_HEADER_SIZE + len+1;
+ ulong item_size = KDBUS_ITEM_HEADER_SIZE + len+1;
item->size = item_size;
item->type = type;
kdbus_copy_string(item->str, str, len);
}
-static void kdbus_meta_payload_write_string_item_preallocated(struct kdbus_meta_payload *__restrict__ pay, kdbus_item_type_t type, char const *__restrict__ str, unsigned len)
+static void kdbus_meta_payload_write_string_item_preallocated(struct kdbus_meta_payload *__restrict__ pay, kdbus_item_type_t type, char const *__restrict__ str, ulong len)
{
struct kdbus_item * __restrict__ item = kdbus_meta_payload_item(pay);
kdbus_write_string_item(item, type, str, len);
{
var(pay, *pptr);
int missing_space;
- unsigned len;
+ ulong len;
len = strlen(str);
missing_space = KDBUS_ITEM_SIZE(len+1) - kdbus_meta_payload_space(pay);
if (unlikely(missing_space > 0) && unlikely(!(pay = kdbus_meta_payload_expand(pptr, missing_space))))
*
* Return: valid flags on success, negative error code on failure.
*/
-wur int kdbus_meta_proc_collect(struct kdbus_meta_payload * __restrict__ * __restrict__ pptr, attach_flags_t what)
+wur int kdbus_meta_proc_collect(struct kdbus_meta_payload * __restrict__ * __restrict__ pptr, ulong what)
{
typeof(*pptr) pay;
void *tmp_page = NULL;
end_page:
if (tmp_page)
- free_page((unsigned long)tmp_page);
+ free_page((ulong)tmp_page);
return valid;
}
list_for_each_entry(owner, &conn->names_list, conn_entry) {
char const *__restrict__ name;
- unsigned len;
- unsigned item_size;
+ ulong len;
+ ulong item_size;
if (owner->flags & KDBUS_NAME_IN_QUEUE)
continue;
len = strlen(name = owner->name->name);
return total_size;
}
-unsigned _kdbus_meta_emit_(struct k_or_io_vec vec, struct kdbus_item *__restrict__ meta_item, attach_flags_t what, unsigned *__restrict__ size, unsigned total_meta_name_size)
+ulong _kdbus_meta_emit_(struct k_or_io_vec vec, struct kdbus_item *__restrict__ meta_item, ulong what, ulong *__restrict__ size, ulong total_meta_name_size)
{
- unsigned cnt = 0;
+ ulong cnt = 0;
struct kdbus_item *__restrict__ item = NULL;
- unsigned chain_size;
- unsigned total_size = 0;
- kdbus_ondbg(attach_flags_t seen = 0;)
+ ulong chain_size;
+ ulong total_size = 0;
+ kdbus_ondbg(ulong seen = 0;)
#define ASSERT_ITEM_ATTACH_VALUES(WHAT) BUILD_BUG_ON(KDBUS_ATTACH_##WHAT != (1ULL << (KDBUS_ITEM_##WHAT-_KDBUS_ITEM_ATTACH_BASE)))
ASSERT_ITEM_ATTACH_VALUES(AUXGROUPS);
if (!what)
return 0;
for (;;) {
- attach_flags_t flag;
- unsigned item_size;
+ ulong flag;
+ ulong item_size;
kdbus_assert(_KDBUS_ITEM_ATTACH_BASE <= meta_item->type);
kdbus_assert(meta_item->type < _KDBUS_ITEM_ATTACH_BASE+8*sizeof(flag));
flag = 1 << ((kdbus_item_type_t)meta_item->type - _KDBUS_ITEM_ATTACH_BASE);
kdbus_assert(flag);
kdbus_assert(!(flag & seen));
kdbus_ondbg(seen |= flag);
- item_size = (flag == KDBUS_ATTACH_NAMES) ? total_meta_name_size : (unsigned)meta_item->size;
+ item_size = (flag == KDBUS_ATTACH_NAMES) ? total_meta_name_size : (ulong)meta_item->size;
if (flag & what) {
if (!item) { /* first in chain */
chain_size = 0;
*
* Return: Set of metadata flags the requestor can see (limited by @wanted).
*/
-static attach_flags_t kdbus_meta_proc_mask(struct pid *prv_pid,
+static ulong kdbus_meta_proc_mask(struct pid *prv_pid,
struct pid *req_pid,
const struct cred *req_cred,
- attach_flags_t wanted)
+ ulong wanted)
{
struct pid_namespace *req_ns = ns_of_pid(req_pid);
KDBUS_ATTACH_EXE);
}
-attach_flags_t kdbus_sanitize_exe_flag(attach_flags_t flags, struct kdbus_conn *dst)
+ulong kdbus_sanitize_exe_flag(ulong flags, struct kdbus_conn *dst)
{
if (flags & KDBUS_ATTACH_EXE) {
struct path p;
*
* Return: Mask of metadata that is granted.
*/
-static attach_flags_t kdbus_meta_get_mask(struct pid *prv_pid, attach_flags_t prv_mask,
+static ulong kdbus_meta_get_mask(struct pid *prv_pid, ulong prv_mask,
struct pid *req_pid,
- const struct cred *req_cred, attach_flags_t req_mask)
+ const struct cred *req_cred, ulong req_mask)
{
- attach_flags_t missing;
+ ulong missing;
/*
* Connection metadata and basic unix process credentials are
void kdbus_meta_payload_collect_auxgroups_preallocated(struct kdbus_meta_payload *__restrict__ pay, struct kdbus_dst_sensitive_meta const *__restrict__ meta, struct kdbus_conn *__restrict__ dst)
{
- unsigned tail_offset = kdbus_meta_payload_tail_offset(pay);
+ ulong tail_offset = kdbus_meta_payload_tail_offset(pay);
int ngroups;
kdbus_assert(meta);
kdbus_assert(meta->cred);
kdbus_fill_auxgroups_meta_item((struct kdbus_item *__restrict__)((uintptr_t)pay + tail_offset), meta, dst);
}
-attach_flags_t kdbus_meta_msg_mask(const struct kdbus_conn *snd,
+ulong kdbus_meta_msg_mask(const struct kdbus_conn *snd,
const struct kdbus_conn *rcv)
{
return kdbus_meta_get_mask(task_pid(current),
struct kdbus_conn;
struct kdbus_pool_slice;
-typedef unsigned attach_flags_t; /* u16 would have been enough but slower */
+typedef uint16_t attach_flags_t;
typedef atomic_t atomic_attach_flags_t;
#define atomic_attach_flags_set atomic_set
#define atomic_attach_flags_read atomic_read
*
* Return: 0 on success, negative error on failure.
*/
-static inline wur int kdbus_sanitize_attach_flags(u64 flags, attach_flags_t *attach_flags)
+static inline wur int kdbus_sanitize_attach_flags(u64 flags, ulong *attach_flags)
{
/* 'any' degrades to 'all' for compatibility */
if (flags == _KDBUS_ATTACH_ANY)
kgid_t gid, egid, sgid, fsgid;
};
-wur attach_flags_t kdbus_dst_sensitive_meta_collect(struct kdbus_dst_sensitive_meta *, attach_flags_t);
-void kdbus_dst_sensitive_meta_destroy(struct kdbus_dst_sensitive_meta *, attach_flags_t valid);
-void kdbus_dst_sensitive_meta_common_destroy(struct kdbus_dst_sensitive_meta_common *common, attach_flags_t valid);
-void kdbus_dst_sensitive_meta_real_destroy(struct kdbus_dst_sensitive_meta *meta, attach_flags_t valid);
+wur ulong kdbus_dst_sensitive_meta_collect(struct kdbus_dst_sensitive_meta *, ulong);
+void kdbus_dst_sensitive_meta_destroy(struct kdbus_dst_sensitive_meta *, ulong valid);
+void kdbus_dst_sensitive_meta_common_destroy(struct kdbus_dst_sensitive_meta_common *common, ulong valid);
+void kdbus_dst_sensitive_meta_real_destroy(struct kdbus_dst_sensitive_meta *meta, ulong valid);
wur int kdbus_dst_sensitive_meta_fake_collect(
struct kdbus_dst_sensitive_meta_fake * __restrict__ meta,
unsigned meta_space;
} __attribute__((aligned(8)));
-static inline void kdbus_meta_payload_set_meta_offset(struct kdbus_meta_payload *pay, unsigned offset) {
+static inline void kdbus_meta_payload_set_meta_offset(struct kdbus_meta_payload *pay, ulong offset) {
pay->meta_offset = pay->tail_offset = offset;
pay->meta_space = (PAGE_SIZE << pay->alloc_order) - offset;
}
-static inline wur struct kdbus_meta_payload *kdbus_meta_payload_new_offset_later(unsigned order) {
+static inline wur struct kdbus_meta_payload *kdbus_meta_payload_new_offset_later(ulong order) {
var(p, (struct kdbus_meta_payload *)__get_free_pages(GFP_TEMPORARY, order));
if (p)
p->alloc_order = order;
return p;
}
-static inline wur struct kdbus_meta_payload *kdbus_meta_payload_new(unsigned order) {
+static inline wur struct kdbus_meta_payload *kdbus_meta_payload_new(ulong order) {
var(p, kdbus_meta_payload_new_offset_later(order));
if (p)
kdbus_meta_payload_set_meta_offset(p, sizeof(*p));
return p;
}
static inline void kdbus_meta_payload_free(struct kdbus_meta_payload *p) {
- free_pages((unsigned long)p, p->alloc_order);
+ free_pages((ulong)p, p->alloc_order);
}
-static inline wur unsigned kdbus_meta_payload_tail_offset(struct kdbus_meta_payload *p) {
+static inline wur ulong kdbus_meta_payload_tail_offset(struct kdbus_meta_payload *p) {
kdbus_assert(!(p->tail_offset % 8));
return p->tail_offset;
}
-static inline wur unsigned kdbus_meta_payload_space(struct kdbus_meta_payload *p) { return p->meta_space; }
+static inline wur ulong kdbus_meta_payload_space(struct kdbus_meta_payload *p) { return p->meta_space; }
static inline wur struct kdbus_item *kdbus_meta_payload_item(struct kdbus_meta_payload *p) { return (struct kdbus_item *)((uintptr_t)p + kdbus_meta_payload_tail_offset(p)); }
-static inline void kdbus_meta_payload_rewind(struct kdbus_meta_payload *pay, unsigned tail_offset) {
+static inline void kdbus_meta_payload_rewind(struct kdbus_meta_payload *pay, ulong tail_offset) {
var(old_offset, pay->tail_offset);
kdbus_assert(tail_offset <= old_offset);
kdbus_assert(tail_offset <= (PAGE_SIZE << pay->alloc_order));
pay->meta_space += old_offset-tail_offset;
kdbus_assert(pay->meta_space + pay->tail_offset == (PAGE_SIZE << pay->alloc_order));
}
-static inline void kdbus_meta_payload_advance(struct kdbus_meta_payload *pay, unsigned bytes) {
+static inline void kdbus_meta_payload_advance(struct kdbus_meta_payload *pay, ulong bytes) {
kdbus_assert(pay->meta_space >= bytes);
kdbus_assert(pay->meta_space + pay->tail_offset == (PAGE_SIZE << pay->alloc_order));
pay->tail_offset += bytes;
kdbus_assert(pay->meta_space + pay->tail_offset == (PAGE_SIZE << pay->alloc_order));
}
/* total size of collected metadata (valid until the first addition) */
-static inline wur unsigned kdbus_meta_payload_size(struct kdbus_meta_payload *load) {
+static inline wur ulong kdbus_meta_payload_size(struct kdbus_meta_payload *load) {
kdbus_assert(load->tail_offset >= load->meta_offset);
return load->tail_offset - load->meta_offset;
}
void *_vec_;
};
-wur attach_flags_t kdbus_sanitize_exe_flag(attach_flags_t flags, struct kdbus_conn *dst);
+wur ulong kdbus_sanitize_exe_flag(ulong flags, struct kdbus_conn *dst);
#define IOVEC_KVEC_BINARY_COMPATIBLE (\
sizeof(struct kvec) == sizeof(struct iovec) &&\
BUILD_BUG_ON(_Alignof(struct iovec) <= 1);
return (struct k_or_io_vec){ vec };
}
-static inline void kdbus_push_vec(struct k_or_io_vec * __restrict__ vec, void * __restrict__ ptr, unsigned size) {
+static inline void kdbus_push_vec(struct k_or_io_vec * __restrict__ vec, void * __restrict__ ptr, ulong size) {
uintptr_t tagged_vec = (uintptr_t)vec->_vec_;
#define ADVANCE_K_OR_IO_VEC(TYPE,TAG) do {\
struct TYPE *v = (struct TYPE *)(tagged_vec & ~(uintptr_t)TAG);\
#undef IOVEC_KVEC_BINARY_COMPATIBLE
-wur int _kdbus_meta_emit_stack_(struct kdbus_dst_sensitive_meta_common const *meta, struct kdbus_conn const *dst, struct k_or_io_vec vec, struct kdbus_meta_stack *buf, attach_flags_t what, unsigned *size, bool fake);
-static inline wur int kdbus_meta_emit_stack_iovec(struct kdbus_dst_sensitive_meta const *meta, struct kdbus_conn const *dst, struct iovec *vec, struct kdbus_meta_stack *buf, attach_flags_t what, unsigned *size) {
+wur int _kdbus_meta_emit_stack_(struct kdbus_dst_sensitive_meta_common const *meta, struct kdbus_conn const *dst, struct k_or_io_vec vec, struct kdbus_meta_stack *buf, ulong what, ulong *size, bool fake);
+static inline wur int kdbus_meta_emit_stack_iovec(struct kdbus_dst_sensitive_meta const *meta, struct kdbus_conn const *dst, struct iovec *vec, struct kdbus_meta_stack *buf, ulong what, ulong *size) {
BUILD_BUG_ON(offsetof(typeof(*meta), common));
return _kdbus_meta_emit_stack_(&meta->common, dst, kdbus_wrap_iovec(vec), buf, what, size, false);
}
-static inline wur int kdbus_meta_emit_stack_kvec(struct kdbus_dst_sensitive_meta const *meta, struct kdbus_conn const *dst, struct kvec *vec, struct kdbus_meta_stack *buf, attach_flags_t what, unsigned *size) {
+static inline wur int kdbus_meta_emit_stack_kvec(struct kdbus_dst_sensitive_meta const *meta, struct kdbus_conn const *dst, struct kvec *vec, struct kdbus_meta_stack *buf, ulong what, ulong *size) {
BUILD_BUG_ON(offsetof(typeof(*meta), common));
return _kdbus_meta_emit_stack_(&meta->common, dst, kdbus_wrap_kvec(vec), buf, what, size, false);
}
-static inline wur int kdbus_meta_emit_stack_fake_kvec(struct kdbus_dst_sensitive_meta_fake const *meta, struct kdbus_conn const *dst, struct kvec *vec, struct kdbus_meta_stack *buf, attach_flags_t what, unsigned *size) {
+static inline wur int kdbus_meta_emit_stack_fake_kvec(struct kdbus_dst_sensitive_meta_fake const *meta, struct kdbus_conn const *dst, struct kvec *vec, struct kdbus_meta_stack *buf, ulong what, ulong *size) {
BUILD_BUG_ON(offsetof(typeof(*meta), common));
return _kdbus_meta_emit_stack_(&meta->common, dst, kdbus_wrap_kvec(vec), buf, what, size, true);
}
return (struct kdbus_item *)((uintptr_t)load + load->meta_offset);
}
-void kdbus_write_string_item(struct kdbus_item *__restrict__ item, kdbus_item_type_t type, char const *__restrict__ str, unsigned len);
+void kdbus_write_string_item(struct kdbus_item *__restrict__ item, kdbus_item_type_t type, char const *__restrict__ str, ulong len);
-wur int kdbus_meta_proc_collect(struct kdbus_meta_payload *__restrict__ *__restrict__ pptr, attach_flags_t what);
+wur int kdbus_meta_proc_collect(struct kdbus_meta_payload *__restrict__ *__restrict__ pptr, ulong what);
-wur unsigned _kdbus_meta_emit_(struct k_or_io_vec vec, struct kdbus_item *meta_item, attach_flags_t what, unsigned *size, unsigned total_meta_name_size);
+wur ulong _kdbus_meta_emit_(struct k_or_io_vec vec, struct kdbus_item *meta_item, ulong what, ulong *size, ulong total_meta_name_size);
-static inline wur unsigned kdbus_meta_emit_kvecs(struct kvec *vec, struct kdbus_item *meta_item, attach_flags_t what, unsigned *size) {
+static inline wur ulong kdbus_meta_emit_kvecs(struct kvec *vec, struct kdbus_item *meta_item, ulong what, ulong *size) {
return _kdbus_meta_emit_(kdbus_wrap_kvec(vec), meta_item, what, size, 0);
}
-static inline wur unsigned kdbus_meta_emit_iovecs(struct iovec *vec, struct kdbus_item *meta_item, attach_flags_t what, unsigned *size, unsigned total_meta_name_size) {
+static inline wur ulong kdbus_meta_emit_iovecs(struct iovec *vec, struct kdbus_item *meta_item, ulong what, ulong *size, ulong total_meta_name_size) {
return _kdbus_meta_emit_(kdbus_wrap_iovec(vec), meta_item, what, size, total_meta_name_size);
}
wur int kdbus_meta_payload_collect_names(struct kdbus_meta_payload *__restrict__ *__restrict__ pptr, struct kdbus_conn const *__restrict__ owner_conn);
-wur attach_flags_t kdbus_meta_msg_mask(const struct kdbus_conn *snd,
+wur ulong kdbus_meta_msg_mask(const struct kdbus_conn *snd,
const struct kdbus_conn *rcv);
void kdbus_collect_timestamp(struct kdbus_timestamp *ts);
const char *name_str)
{
struct kdbus_name_entry *name;
- unsigned namelen;
+ ulong namelen;
lockdep_assert_held(&r->rwlock);
static int kdbus_list_write(struct kdbus_conn *conn,
struct kdbus_conn *c,
struct kdbus_pool_slice *slice,
- unsigned *pos,
+ ulong *pos,
struct kdbus_name_owner *o)
{
struct kvec kvec[4];
- unsigned cnt = 0;
- unsigned size;
+ ulong cnt = 0;
+ ulong size;
int ret;
/* info header */
/* append name */
if (o) {
- unsigned slen = strlen(o->name->name) + 1;
+ ulong slen = strlen(o->name->name) + 1;
h.size = offsetof(struct kdbus_item, name.name) + slen;
h.type = KDBUS_ITEM_OWNED_NAME;
static int kdbus_list_all(struct kdbus_conn *conn, u64 flags,
struct kdbus_pool_slice *slice,
- unsigned *pos)
+ ulong *pos)
{
struct kdbus_conn *c;
var(bus, conn->ep->bus);
- unsigned p = *pos;
+ ulong p = *pos;
int ret, i;
hash_for_each(bus->conn_hash, i, c, hentry) {
struct kdbus_bus *bus = ep->bus;
struct kdbus_pool_slice *slice = NULL;
struct kdbus_cmd_list *cmd;
- unsigned pos, size;
+ ulong pos, size;
int ret;
struct kdbus_args args = {
*
* Return: hash value of the passed string
*/
-static unsigned int kdbus_node_name_hash(const char *name)
+static ulong kdbus_node_name_hash(const char *name)
{
- unsigned int hash;
-
/* reserve hash numbers 0, 1 and >=INT_MAX for magic directories */
- hash = kdbus_strhash(name) & INT_MAX;
+ ulong hash = kdbus_strhash(name) & INT_MAX;
if (hash < 2)
hash += 2;
if (hash >= INT_MAX)
* an integer less than or greater than zero if @name is found, respectively,
* to be less than or be greater than the string stored in @node.
*/
-static int kdbus_node_name_compare(unsigned int hash, const char *name,
+static long kdbus_node_name_compare(ulong hash, const char *name,
const struct kdbus_node *node)
{
if (hash != node->hash)
* Once this call returns, you must use the node_ref() and node_unref()
* functions to manage this node.
*/
-void kdbus_node_init(struct kdbus_node *node, unsigned int type)
+void kdbus_node_init(struct kdbus_node *node, unsigned type)
{
atomic_set(&node->refcnt, 1);
mutex_init(&node->lock);
{
struct kdbus_node *child;
struct rb_node *rb;
- unsigned int hash;
+ ulong hash;
int ret;
hash = kdbus_node_name_hash(name);
}
static struct kdbus_node *node_find_closest_unlocked(struct kdbus_node *node,
- unsigned int hash,
+ ulong hash,
const char *name)
{
struct kdbus_node *n, *pos = NULL;
* Return: Reference to acquired child, or NULL if none found.
*/
struct kdbus_node *kdbus_node_find_closest(struct kdbus_node *node,
- unsigned int hash)
+ ulong hash)
{
struct kdbus_node *child;
struct rb_node *rb;
wait_queue_head_t waitq;
/* static members */
- unsigned int type;
+ unsigned type;
kdbus_node_free_t free_cb;
kdbus_node_release_t release_cb;
umode_t mode;
/* valid once linked */
char *name;
- unsigned hash;
+ ulong hash;
unsigned id;
struct kdbus_node *parent; /* may be NULL */
extern struct ida kdbus_node_ida;
-void kdbus_node_init(struct kdbus_node *node, unsigned int type);
+void kdbus_node_init(struct kdbus_node *node, unsigned type);
wur int kdbus_node_link(struct kdbus_node *node, struct kdbus_node *parent,
const char *name);
wur struct kdbus_node *kdbus_node_find_child(struct kdbus_node *node,
const char *name);
wur struct kdbus_node *kdbus_node_find_closest(struct kdbus_node *node,
- unsigned int hash);
+ ulong hash);
wur struct kdbus_node *kdbus_node_next_child(struct kdbus_node *node,
struct kdbus_node *prev);
u64 old_flags, u64 new_flags,
const char *name)
{
- unsigned name_len;
+ ulong name_len;
struct kdbus_staging_kernel *s;
name_len = strlen(name);
kfree(e);
}
-static unsigned int kdbus_strnhash(const char *str, unsigned len)
+static ulong kdbus_strnhash(const char *str, ulong len)
{
- unsigned long hash = init_name_hash();
+ ulong hash = init_name_hash();
while (len--)
hash = partial_name_hash(*str++, hash);
{
struct kdbus_policy_db_entry *e;
const char *dot;
- unsigned len;
+ ulong len;
/* find exact match */
hash_for_each_possible(db->entries_hash, e, hentry, hash)
{
struct kdbus_policy_db_entry *e;
struct hlist_node *tmp;
- unsigned int i;
+ ulong i;
/* purge entries */
down_write(&db->entries_rwlock);
*/
int kdbus_policy_query_unlocked(struct kdbus_policy_db *db,
const struct cred *cred, const char *name,
- unsigned int hash)
+ ulong hash)
{
struct kdbus_policy_db_entry_access *a;
const struct kdbus_policy_db_entry *e;
* Return: The highest KDBUS_POLICY_* access type found, or -EPERM if none.
*/
int kdbus_policy_query(struct kdbus_policy_db *db, const struct cred *cred,
- const char *name, unsigned int hash)
+ const char *name, ulong hash)
{
int ret;
*/
int kdbus_policy_set(struct kdbus_policy_db *db,
const struct kdbus_item *items,
- unsigned items_size,
- unsigned max_policies,
+ ulong items_size,
+ ulong max_policies,
bool allow_wildcards,
const void *owner)
{
struct hlist_node *tmp;
HLIST_HEAD(entries);
HLIST_HEAD(restore);
- unsigned count = 0;
+ ulong count = 0;
int i, ret = 0;
u32 hash;
KDBUS_ITEMS_FOREACH(item, items, items_size) {
switch ((kdbus_item_type_t)item->type) {
case KDBUS_ITEM_NAME: {
- unsigned len;
+ ulong len;
if (max_policies && ++count > max_policies) {
ret = -E2BIG;
wur int kdbus_policy_query_unlocked(struct kdbus_policy_db *db,
const struct cred *cred, const char *name,
- unsigned int hash);
+ ulong hash);
wur int kdbus_policy_query(struct kdbus_policy_db *db, const struct cred *cred,
- const char *name, unsigned int hash);
+ const char *name, ulong hash);
void kdbus_policy_remove_owner(struct kdbus_policy_db *db,
const void *owner);
wur int kdbus_policy_set(struct kdbus_policy_db *db,
const struct kdbus_item *items,
- unsigned items_size,
- unsigned max_policies,
+ ulong items_size,
+ ulong max_policies,
bool allow_wildcards,
const void *owner);
#include "util.h"
static struct kdbus_pool_slice *kdbus_pool_slice_new(struct kdbus_pool *pool,
- unsigned off, unsigned size)
+ ulong off, ulong size)
{
struct kdbus_pool_slice *slice;
}
static struct kdbus_pool_slice *kdbus_pool_find_slice(struct kdbus_pool *pool,
- unsigned off)
+ ulong off)
{
struct rb_node *n;
* Return: the allocated slice on success, ERR_PTR on failure.
*/
struct kdbus_pool_slice *kdbus_pool_slice_alloc(struct kdbus_pool *pool,
- unsigned size, bool accounted)
+ ulong size, bool accounted)
{
- unsigned slice_size = KDBUS_ALIGN8(size);
+ ulong slice_size = KDBUS_ALIGN8(size);
struct rb_node *n, *found = NULL;
struct kdbus_pool_slice *s;
int ret = 0;
*
* Return: 0 on success, ENXIO if the offset is invalid or not public.
*/
-int kdbus_pool_release_offset(struct kdbus_pool *pool, unsigned off)
+int kdbus_pool_release_offset(struct kdbus_pool *pool, ulong off)
{
struct kdbus_pool_slice *slice;
int ret = 0;
*
* Return: size of the given slice
*/
-unsigned kdbus_pool_slice_size(const struct kdbus_pool_slice *slice)
+ulong kdbus_pool_slice_size(const struct kdbus_pool_slice *slice)
{
return slice->size;
}
*
* Return: 0 on success, -error on failure
*/
-int kdbus_pool_init(struct kdbus_pool *p, const char *name, unsigned size)
+int kdbus_pool_init(struct kdbus_pool *p, const char *name, ulong size)
{
struct kdbus_pool_slice *s;
struct file *f;
* change after the function returns, as the pool lock is dropped. You need to
* protect the data via other means, if you need reliable accounting.
*/
-void kdbus_pool_accounted(struct kdbus_pool *pool, unsigned *size, unsigned *acc)
+void kdbus_pool_accounted(struct kdbus_pool *pool, ulong *size, ulong *acc)
{
*size = pool->size;
mutex_lock(&pool->lock);
*/
int
kdbus_pool_slice_copy_iovec(struct kdbus_pool *pool, const struct kdbus_pool_slice *slice,
- struct iovec *iov, unsigned iov_len, unsigned total_len)
+ struct iovec *iov, ulong iov_len, ulong total_len)
{
struct iov_iter iter;
int len;
* Return: 0 on success, negative errno on failure.
*/
int kdbus_pool_slice_copy_kvec(struct kdbus_pool *pool, const struct kdbus_pool_slice *slice,
- unsigned off, struct kvec *kvec,
- unsigned kvec_len, unsigned total_len)
+ ulong off, struct kvec *kvec,
+ ulong kvec_len, ulong total_len)
{
struct iov_iter iter;
mm_segment_t old_fs;
struct inode *i_dst = file_inode(f_dst);
struct address_space *mapping_dst = f_dst->f_mapping;
const struct address_space_operations *aops = mapping_dst->a_ops;
- unsigned long len = slice_src->size;
+ ulong len = slice_src->size;
loff_t off_src = slice_src->off;
- unsigned off_dst = slice_dst->off;
+ ulong off_dst = slice_dst->off;
mm_segment_t old_fs;
int ret = 0;
old_fs = get_fs();
set_fs(get_ds());
while (len) {
- unsigned long page_off;
- unsigned long copy_len;
+ ulong page_off;
+ ulong copy_len;
char __user *kaddr;
struct page *page;
int n_read;
long status;
page_off = off_dst & (PAGE_CACHE_SIZE - 1);
- copy_len = min_t(unsigned long,
- PAGE_CACHE_SIZE - page_off, len);
+ copy_len = min_t(ulong, PAGE_CACHE_SIZE - page_off, len);
status = aops->write_begin(f_dst, mapping_dst, off_dst,
copy_len, 0, &page, &fsdata);
*/
struct kdbus_pool {
struct file *f;
- unsigned size;
- unsigned accounted_size;
+ ulong size;
+ ulong accounted_size;
struct mutex lock;
struct list_head slices;
* offset.
*/
struct kdbus_pool_slice {
- unsigned off;
- unsigned size;
+ ulong off;
+ ulong size;
struct list_head entry;
struct rb_node rb_node;
bool ref_user;
};
-wur int kdbus_pool_init(struct kdbus_pool *p, const char *name, unsigned size);
+wur int kdbus_pool_init(struct kdbus_pool *p, const char *name, ulong size);
void kdbus_pool_destroy(struct kdbus_pool *pool);
-void kdbus_pool_accounted(struct kdbus_pool *pool, unsigned *size, unsigned *acc);
+void kdbus_pool_accounted(struct kdbus_pool *pool, ulong *size, ulong *acc);
wur int kdbus_pool_mmap(const struct kdbus_pool *pool, struct vm_area_struct *vma);
-wur int kdbus_pool_release_offset(struct kdbus_pool *pool, unsigned off);
+wur int kdbus_pool_release_offset(struct kdbus_pool *pool, ulong off);
void kdbus_pool_publish_empty(struct kdbus_pool *pool, u64 *off, u64 *size);
wur struct kdbus_pool_slice *kdbus_pool_slice_alloc(struct kdbus_pool *pool,
- unsigned size, bool accounted);
+ ulong size, bool accounted);
void kdbus_pool_slice_release(struct kdbus_pool *pool, struct kdbus_pool_slice *slice);
void kdbus_pool_slice_publish(struct kdbus_pool *pool, struct kdbus_pool_slice *slice,
u64 *out_offset, u64 *out_size);
wur off_t kdbus_pool_slice_offset(const struct kdbus_pool_slice *slice);
-wur unsigned kdbus_pool_slice_size(const struct kdbus_pool_slice *slice);
+wur ulong kdbus_pool_slice_size(const struct kdbus_pool_slice *slice);
wur int kdbus_pool_slice_copy(struct kdbus_pool *dst_pool, const struct kdbus_pool_slice *slice_dst,
struct kdbus_pool *src_pool, const struct kdbus_pool_slice *slice_src);
wur int kdbus_pool_slice_copy_kvec(struct kdbus_pool *pool, const struct kdbus_pool_slice *slice,
- unsigned off, struct kvec *kvec,
- unsigned kvec_count, unsigned total_len);
+ ulong off, struct kvec *kvec,
+ ulong kvec_count, ulong total_len);
wur int kdbus_pool_slice_copy_iovec(struct kdbus_pool *pool, const struct kdbus_pool_slice *slice,
- struct iovec *iov, unsigned iov_count, unsigned total_len);
+ struct iovec *iov, ulong iov_count, ulong total_len);
#endif
struct kdbus_conn *__restrict__ dst)
{
struct kdbus_pool_slice *slice = NULL;
- unsigned size, fds;
+ ulong size, fds;
int ret;
kdbus_assert(e);
kdbus_assert(src);
{
struct kdbus_reply *r;
int ret;
- unsigned at = atomic_dec_return(&reply_dst->request_quota);
+ ulong at = atomic_dec_return(&reply_dst->request_quota);
if (!at) {
ret = -EMLINK;
*
* Return: 0 on success, negative error code on failure.
*/
-int kdbus_copy_from_user(void *dest, void __user *user_ptr, unsigned size)
+int kdbus_copy_from_user(void *dest, void __user *user_ptr, ulong size)
{
if (!KDBUS_IS_ALIGNED8((uintptr_t)user_ptr))
return -EFAULT;
*
* Set @src and @len in @kvec, and increase @total_len by @len.
*/
-void kdbus_kvec_set(struct kvec *kvec, void *src, unsigned len)
+void kdbus_kvec_set(struct kvec *kvec, void *src, ulong len)
{
kvec->iov_base = src;
kvec->iov_len = len;
*
* Return: the number of added padding bytes.
*/
-bool kdbus_kvec_pad(struct kvec *kvec, unsigned len)
+bool kdbus_kvec_pad(struct kvec *kvec, ulong len)
{
- unsigned pad = KDBUS_PADDING_TO_8(len);
+ ulong pad = KDBUS_PADDING_TO_8(len);
if (!pad)
return false;
*
* Return: hash value
*/
-static inline wur unsigned int kdbus_strhash(const char *str)
+static inline wur ulong kdbus_strhash(const char *str)
{
- unsigned long hash = init_name_hash();
+ ulong hash = init_name_hash();
while (*str)
hash = partial_name_hash(*str++, hash);
wur int kdbus_verify_uid_prefix(const char *name, struct user_namespace *user_ns,
kuid_t kuid);
-wur int kdbus_copy_from_user(void *dest, void __user *user_ptr, unsigned size);
+wur int kdbus_copy_from_user(void *dest, void __user *user_ptr, ulong size);
struct kvec;
-void kdbus_kvec_set(struct kvec *kvec, void *src, unsigned len);
-wur bool kdbus_kvec_pad(struct kvec *kvec, unsigned len);
+void kdbus_kvec_set(struct kvec *kvec, void *src, ulong len);
+wur bool kdbus_kvec_pad(struct kvec *kvec, ulong len);
-static inline wur unsigned kdbus_page_alloc_order(unsigned alloc_size) {
- unsigned res = sizeof(unsigned)*8 - 1 - PAGE_SHIFT - __builtin_clz(2*alloc_size | PAGE_SIZE);
+static inline wur ulong kdbus_page_alloc_order(ulong alloc_size) {
+ ulong res = sizeof(ulong)*8 - 1 - PAGE_SHIFT - __builtin_clz(2*alloc_size | PAGE_SIZE);
kdbus_assert(alloc_size <= (PAGE_SIZE << res));
return res;
}