Import old logger_aio_write() 31/268731/4
authorŁukasz Stelmach <l.stelmach@samsung.com>
Wed, 15 Dec 2021 23:16:00 +0000 (00:16 +0100)
committerŁukasz Stelmach <l.stelmach@samsung.com>
Tue, 4 Jan 2022 21:14:36 +0000 (22:14 +0100)
Copy logger_aio_write() and two other functions from Android logger
driver in v3.10.65. Use them when compiling against old kernels.

Change-Id: I1e2cb304106e48ffad80f03dc7c9ae899cd228f4
Signed-off-by: Łukasz Stelmach <l.stelmach@samsung.com>
kernel/logger.c

index eb287e4..853a862 100644 (file)
 # include <linux/sched.h>
 #endif
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0))
+# define KERNEL_VERSION_BELOW_3_18
+# include <linux/aio.h>
+#endif
+
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
@@ -476,7 +481,11 @@ static struct file *make_new_file(struct file *file)
                return ERR_PTR(-ENOMEM);
        }
 
+#ifdef KERNEL_VERSION_BELOW_3_18
+       p = d_path(&file->f_path, pbuf, PATH_MAX);
+#else
        p = file_path(file, pbuf, PATH_MAX);
+#endif
        if (!p) {
                kfree(pbuf);
                return ERR_PTR(-EFAULT);
@@ -547,6 +556,129 @@ static void flush_thread_data(struct file* file)
        writer->buffer[0] = '\0';
 }
 
+#ifdef KERNEL_VERSION_BELOW_3_18
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+       size_t len;
+
+       len = min(count, log->size - log->w_off);
+       memcpy(log->buffer + log->w_off, buf, len);
+
+       if (count != len)
+               memcpy(log->buffer, buf + len, count - len);
+
+       log->w_off = logger_offset(log, log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+                                     const void __user *buf, size_t count)
+{
+       size_t len;
+
+       len = min(count, log->size - log->w_off);
+       if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+               return -EFAULT;
+
+       if (count != len)
+               if (copy_from_user(log->buffer, buf + len, count - len))
+                       /*
+                        * Note that by not updating w_off, this abandons the
+                        * portion of the new entry that *was* successfully
+                        * copied, just above.  This is intentional to avoid
+                        * message corruption from missing fragments.
+                        */
+                       return -EFAULT;
+
+       log->w_off = logger_offset(log, log->w_off + count);
+
+       return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                        unsigned long nr_segs, loff_t ppos)
+{
+       struct logger_log *log = file_get_log(iocb->ki_filp);
+       size_t orig;
+       struct logger_entry header;
+       struct timespec now;
+       ssize_t ret = 0;
+
+       now = current_kernel_time();
+
+       header.pid = current->tgid;
+       header.tid = current->pid;
+       header.sec = now.tv_sec;
+       header.nsec = now.tv_nsec;
+       header.euid = current_euid();
+       header.len = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
+       header.hdr_size = sizeof(struct logger_entry);
+
+       /* null writes succeed, return zero */
+       if (unlikely(!header.len))
+               return 0;
+
+       mutex_lock(&log->mutex);
+
+       orig = log->w_off;
+
+       /*
+        * Fix up any readers, pulling them forward to the first readable
+        * entry after (what will be) the new write offset. We do this now
+        * because if we partially fail, we can end up with clobbered log
+        * entries that encroach on readable buffer.
+        */
+       fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+       do_write_log(log, &header, sizeof(struct logger_entry));
+
+       while (nr_segs-- > 0) {
+               size_t len;
+               ssize_t nr;
+
+               /* figure out how much of this vector we can keep */
+               len = min_t(size_t, iov->iov_len, header.len - ret);
+
+               /* write out this segment's payload */
+               nr = do_write_log_from_user(log, iov->iov_base, len);
+               if (unlikely(nr < 0)) {
+                       log->w_off = orig;
+                       mutex_unlock(&log->mutex);
+                       return nr;
+               }
+
+               iov++;
+               ret += nr;
+       }
+
+       mutex_unlock(&log->mutex);
+
+       /* wake up any blocked readers */
+       wake_up_interruptible(&log->wq);
+
+       return ret;
+}
+
+#else  /* KERNEL_VERSION_BELOW_3_18 */
+
 /*
  * logger_write_iter - our write method, implementing support for write(),
  * writev(), and aio_write(). Writes are our fast path, and we try to optimize
@@ -724,6 +856,7 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
        return count;
 }
+#endif /* KERNEL_VERSION_BELOW_3_18 */
 
 static struct logger_log *get_log_from_minor(int minor)
 {
@@ -1012,7 +1145,11 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 static const struct file_operations logger_fops = {
        .owner = THIS_MODULE,
        .read = logger_read,
+#ifdef KERNEL_VERSION_BELOW_3_18
+       .aio_write = logger_aio_write,
+#else
        .write_iter = logger_write_iter,
+#endif
        .poll = logger_poll,
        .unlocked_ioctl = logger_ioctl,
        .compat_ioctl = logger_ioctl,