1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018 Google, Inc. */
4 #include "gasket_ioctl.h"
5 #include "gasket_constants.h"
6 #include "gasket_core.h"
7 #include "gasket_interrupt.h"
8 #include "gasket_page_table.h"
9 #include <linux/compiler.h>
10 #include <linux/device.h>
12 #include <linux/uaccess.h>
14 #ifdef GASKET_KERNEL_TRACE_SUPPORT
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/gasket_ioctl.h>
18 #define trace_gasket_ioctl_entry(x, ...)
19 #define trace_gasket_ioctl_exit(x)
20 #define trace_gasket_ioctl_integer_data(x)
21 #define trace_gasket_ioctl_eventfd_data(x, ...)
22 #define trace_gasket_ioctl_page_table_data(x, ...)
23 #define trace_gasket_ioctl_config_coherent_allocator(x, ...)
26 /* Associate an eventfd with an interrupt. */
27 static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
28 struct gasket_interrupt_eventfd __user *argp)
30 struct gasket_interrupt_eventfd die;
32 if (copy_from_user(&die, argp, sizeof(struct gasket_interrupt_eventfd)))
35 trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
37 return gasket_interrupt_set_eventfd(
38 gasket_dev->interrupt_data, die.interrupt, die.event_fd);
41 /* Read the size of the page table. */
42 static int gasket_read_page_table_size(
43 struct gasket_dev *gasket_dev,
44 struct gasket_page_table_ioctl __user *argp)
47 struct gasket_page_table_ioctl ibuf;
49 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
52 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
55 ibuf.size = gasket_page_table_num_entries(
56 gasket_dev->page_table[ibuf.page_table_index]);
58 trace_gasket_ioctl_page_table_data(
59 ibuf.page_table_index, ibuf.size, ibuf.host_address,
62 if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
68 /* Read the size of the simple page table. */
69 static int gasket_read_simple_page_table_size(
70 struct gasket_dev *gasket_dev,
71 struct gasket_page_table_ioctl __user *argp)
74 struct gasket_page_table_ioctl ibuf;
76 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
79 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
83 gasket_page_table_num_simple_entries(gasket_dev->page_table[ibuf.page_table_index]);
85 trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
89 if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
95 /* Set the boundary between the simple and extended page tables. */
96 static int gasket_partition_page_table(
97 struct gasket_dev *gasket_dev,
98 struct gasket_page_table_ioctl __user *argp)
101 struct gasket_page_table_ioctl ibuf;
102 uint max_page_table_size;
104 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
107 trace_gasket_ioctl_page_table_data(
108 ibuf.page_table_index, ibuf.size, ibuf.host_address,
109 ibuf.device_address);
111 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
113 max_page_table_size = gasket_page_table_max_size(
114 gasket_dev->page_table[ibuf.page_table_index]);
116 if (ibuf.size > max_page_table_size) {
117 dev_dbg(gasket_dev->dev,
118 "Partition request 0x%llx too large, max is 0x%x\n",
119 ibuf.size, max_page_table_size);
123 mutex_lock(&gasket_dev->mutex);
125 ret = gasket_page_table_partition(
126 gasket_dev->page_table[ibuf.page_table_index], ibuf.size);
127 mutex_unlock(&gasket_dev->mutex);
132 /* Map a userspace buffer to a device virtual address. */
133 static int gasket_map_buffers(struct gasket_dev *gasket_dev,
134 struct gasket_page_table_ioctl __user *argp)
136 struct gasket_page_table_ioctl ibuf;
138 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
141 trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
143 ibuf.device_address);
145 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
148 if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[ibuf.page_table_index],
150 ibuf.device_address, ibuf.size))
153 return gasket_page_table_map(gasket_dev->page_table[ibuf.page_table_index],
154 ibuf.host_address, ibuf.device_address,
155 ibuf.size / PAGE_SIZE);
158 /* Unmap a userspace buffer from a device virtual address. */
159 static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
160 struct gasket_page_table_ioctl __user *argp)
162 struct gasket_page_table_ioctl ibuf;
164 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
167 trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
169 ibuf.device_address);
171 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
174 if (gasket_page_table_is_dev_addr_bad(gasket_dev->page_table[ibuf.page_table_index],
175 ibuf.device_address, ibuf.size))
178 gasket_page_table_unmap(gasket_dev->page_table[ibuf.page_table_index],
179 ibuf.device_address, ibuf.size / PAGE_SIZE);
185 * Reserve structures for coherent allocation, and allocate or free the
186 * corresponding memory.
188 static int gasket_config_coherent_allocator(
189 struct gasket_dev *gasket_dev,
190 struct gasket_coherent_alloc_config_ioctl __user *argp)
193 struct gasket_coherent_alloc_config_ioctl ibuf;
195 if (copy_from_user(&ibuf, argp,
196 sizeof(struct gasket_coherent_alloc_config_ioctl)))
199 trace_gasket_ioctl_config_coherent_allocator(ibuf.enable, ibuf.size,
202 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
205 if (ibuf.size > PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
208 if (ibuf.enable == 0) {
209 ret = gasket_free_coherent_memory(gasket_dev, ibuf.size,
211 ibuf.page_table_index);
213 ret = gasket_alloc_coherent_memory(gasket_dev, ibuf.size,
215 ibuf.page_table_index);
219 if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
225 /* Check permissions for Gasket ioctls. */
226 static bool gasket_ioctl_check_permissions(struct file *filp, uint cmd)
230 struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
232 alive = (gasket_dev->status == GASKET_STATUS_ALIVE);
234 dev_dbg(gasket_dev->dev, "%s alive %d status %d\n",
235 __func__, alive, gasket_dev->status);
237 read = !!(filp->f_mode & FMODE_READ);
238 write = !!(filp->f_mode & FMODE_WRITE);
241 case GASKET_IOCTL_RESET:
242 case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
245 case GASKET_IOCTL_PAGE_TABLE_SIZE:
246 case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
247 case GASKET_IOCTL_NUMBER_PAGE_TABLES:
250 case GASKET_IOCTL_PARTITION_PAGE_TABLE:
251 case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
252 return alive && write;
254 case GASKET_IOCTL_MAP_BUFFER:
255 case GASKET_IOCTL_UNMAP_BUFFER:
256 return alive && write;
258 case GASKET_IOCTL_CLEAR_EVENTFD:
259 case GASKET_IOCTL_SET_EVENTFD:
260 return alive && write;
263 return false; /* unknown permissions */
267 * standard ioctl dispatch function.
268 * @filp: File structure pointer describing this node usage session.
269 * @cmd: ioctl number to handle.
270 * @argp: ioctl-specific data pointer.
272 * Standard ioctl dispatcher; forwards operations to individual handlers.
274 long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp)
276 struct gasket_dev *gasket_dev;
277 unsigned long arg = (unsigned long)argp;
278 gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
281 gasket_dev = (struct gasket_dev *)filp->private_data;
282 trace_gasket_ioctl_entry(gasket_dev->dev_info.name, cmd);
284 ioctl_permissions_cb = gasket_get_ioctl_permissions_cb(gasket_dev);
285 if (ioctl_permissions_cb) {
286 retval = ioctl_permissions_cb(filp, cmd, argp);
288 trace_gasket_ioctl_exit(retval);
290 } else if (retval == 0) {
291 trace_gasket_ioctl_exit(-EPERM);
294 } else if (!gasket_ioctl_check_permissions(filp, cmd)) {
295 trace_gasket_ioctl_exit(-EPERM);
296 dev_dbg(gasket_dev->dev, "ioctl cmd=%x noperm\n", cmd);
300 /* Tracing happens in this switch statement for all ioctls with
301 * an integer argrument, but ioctls with a struct argument
302 * that needs copying and decoding, that tracing is done within
306 case GASKET_IOCTL_RESET:
307 retval = gasket_reset(gasket_dev);
309 case GASKET_IOCTL_SET_EVENTFD:
310 retval = gasket_set_event_fd(gasket_dev, argp);
312 case GASKET_IOCTL_CLEAR_EVENTFD:
313 trace_gasket_ioctl_integer_data(arg);
315 gasket_interrupt_clear_eventfd(gasket_dev->interrupt_data,
318 case GASKET_IOCTL_PARTITION_PAGE_TABLE:
319 trace_gasket_ioctl_integer_data(arg);
320 retval = gasket_partition_page_table(gasket_dev, argp);
322 case GASKET_IOCTL_NUMBER_PAGE_TABLES:
323 trace_gasket_ioctl_integer_data(gasket_dev->num_page_tables);
324 if (copy_to_user(argp, &gasket_dev->num_page_tables,
330 case GASKET_IOCTL_PAGE_TABLE_SIZE:
331 retval = gasket_read_page_table_size(gasket_dev, argp);
333 case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
334 retval = gasket_read_simple_page_table_size(gasket_dev, argp);
336 case GASKET_IOCTL_MAP_BUFFER:
337 retval = gasket_map_buffers(gasket_dev, argp);
339 case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
340 retval = gasket_config_coherent_allocator(gasket_dev, argp);
342 case GASKET_IOCTL_UNMAP_BUFFER:
343 retval = gasket_unmap_buffers(gasket_dev, argp);
345 case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
346 /* Clear interrupt counts doesn't take an arg, so use 0. */
347 trace_gasket_ioctl_integer_data(0);
348 retval = gasket_interrupt_reset_counts(gasket_dev);
351 /* If we don't understand the ioctl, the best we can do is trace
354 trace_gasket_ioctl_integer_data(arg);
355 dev_dbg(gasket_dev->dev,
356 "Unknown ioctl cmd=0x%x not caught by "
357 "gasket_is_supported_ioctl\n",
363 trace_gasket_ioctl_exit(retval);
368 * Determines if an ioctl is part of the standard Gasket framework.
369 * @cmd: The ioctl number to handle.
371 * Returns 1 if the ioctl is supported and 0 otherwise.
373 long gasket_is_supported_ioctl(uint cmd)
376 case GASKET_IOCTL_RESET:
377 case GASKET_IOCTL_SET_EVENTFD:
378 case GASKET_IOCTL_CLEAR_EVENTFD:
379 case GASKET_IOCTL_PARTITION_PAGE_TABLE:
380 case GASKET_IOCTL_NUMBER_PAGE_TABLES:
381 case GASKET_IOCTL_PAGE_TABLE_SIZE:
382 case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
383 case GASKET_IOCTL_MAP_BUFFER:
384 case GASKET_IOCTL_UNMAP_BUFFER:
385 case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
386 case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR: