* [RFC 3/9] staging/android/sync: Move sync framework out of staging
2016-01-13 17:57 [RFC 0/9] Add native sync support to i915 driver John.C.Harrison
2016-01-13 17:57 ` [RFC 1/9] staging/android/sync: Support sync points created from dma-fences John.C.Harrison
2016-01-13 17:57 ` [RFC 2/9] staging/android/sync: add sync_fence_create_dma John.C.Harrison
@ 2016-01-13 17:57 ` John.C.Harrison
2016-01-13 19:00 ` Gustavo Padovan
` (2 more replies)
2016-01-13 17:57 ` [RFC 4/9] android/sync: Improved debug dump to dmesg John.C.Harrison
` (6 subsequent siblings)
9 siblings, 3 replies; 23+ messages in thread
From: John.C.Harrison @ 2016-01-13 17:57 UTC (permalink / raw)
To: Intel-GFX
Cc: Greg Kroah-Hartman, Riley Andrews, Arve Hjønnevåg,
Gustavo Padovan
From: John Harrison <John.C.Harrison@Intel.com>
The sync framework is now used by the i915 driver. Therefore it can be
moved out of staging and into the regular tree. Also, the public
interfaces can actually be made public and exported.
v0.3: New patch for series.
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Geoff Miller <geoff.miller@intel.com>
Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Riley Andrews <riandrews@android.com>
---
drivers/android/Kconfig | 28 ++
drivers/android/Makefile | 2 +
drivers/android/sw_sync.c | 260 ++++++++++++
drivers/android/sw_sync.h | 59 +++
drivers/android/sync.c | 734 +++++++++++++++++++++++++++++++++
drivers/android/sync.h | 366 ++++++++++++++++
drivers/android/sync_debug.c | 255 ++++++++++++
drivers/android/trace/sync.h | 82 ++++
drivers/staging/android/Kconfig | 28 --
drivers/staging/android/Makefile | 2 -
drivers/staging/android/sw_sync.c | 260 ------------
drivers/staging/android/sw_sync.h | 59 ---
drivers/staging/android/sync.c | 734 ---------------------------------
drivers/staging/android/sync.h | 366 ----------------
drivers/staging/android/sync_debug.c | 255 ------------
drivers/staging/android/trace/sync.h | 82 ----
drivers/staging/android/uapi/sw_sync.h | 32 --
drivers/staging/android/uapi/sync.h | 97 -----
include/uapi/Kbuild | 1 +
include/uapi/sync/Kbuild | 3 +
include/uapi/sync/sw_sync.h | 32 ++
include/uapi/sync/sync.h | 97 +++++
22 files changed, 1919 insertions(+), 1915 deletions(-)
create mode 100644 drivers/android/sw_sync.c
create mode 100644 drivers/android/sw_sync.h
create mode 100644 drivers/android/sync.c
create mode 100644 drivers/android/sync.h
create mode 100644 drivers/android/sync_debug.c
create mode 100644 drivers/android/trace/sync.h
delete mode 100644 drivers/staging/android/sw_sync.c
delete mode 100644 drivers/staging/android/sw_sync.h
delete mode 100644 drivers/staging/android/sync.c
delete mode 100644 drivers/staging/android/sync.h
delete mode 100644 drivers/staging/android/sync_debug.c
delete mode 100644 drivers/staging/android/trace/sync.h
delete mode 100644 drivers/staging/android/uapi/sw_sync.h
delete mode 100644 drivers/staging/android/uapi/sync.h
create mode 100644 include/uapi/sync/Kbuild
create mode 100644 include/uapi/sync/sw_sync.h
create mode 100644 include/uapi/sync/sync.h
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index bdfc6c6..9edcd8f 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -32,6 +32,34 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config SYNC
+ bool "Synchronization framework"
+ default n
+ select ANON_INODES
+ select DMA_SHARED_BUFFER
+ ---help---
+ This option enables the framework for synchronization between multiple
+ drivers. Sync implementations can take advantage of hardware
+ synchronization built into devices like GPUs.
+
+config SW_SYNC
+ bool "Software synchronization objects"
+ default n
+ depends on SYNC
+ ---help---
+ A sync object driver that uses a 32bit counter to coordinate
+ synchronization. Useful when there is no hardware primitive backing
+ the synchronization.
+
+config SW_SYNC_USER
+ bool "Userspace API for SW_SYNC"
+ default n
+ depends on SW_SYNC
+ ---help---
+ Provides a user space API to the sw sync object.
+ *WARNING* improper use of this can result in deadlocking kernel
+ drivers from userspace.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 3b7e4b0..a1465dd 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,5 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_SYNC) += sync.o sync_debug.o
+obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/android/sw_sync.c b/drivers/android/sw_sync.c
new file mode 100644
index 0000000..c4ff167
--- /dev/null
+++ b/drivers/android/sw_sync.c
@@ -0,0 +1,260 @@
+/*
+ * drivers/base/sw_sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include "sw_sync.h"
+
+static int sw_sync_cmp(u32 a, u32 b)
+{
+ if (a == b)
+ return 0;
+
+ return ((s32)a - (s32)b) < 0 ? -1 : 1;
+}
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+{
+ struct sw_sync_pt *pt;
+
+ pt = (struct sw_sync_pt *)
+ sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
+
+ pt->value = value;
+
+ return (struct sync_pt *)pt;
+}
+EXPORT_SYMBOL(sw_sync_pt_create);
+
+static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+ struct sw_sync_timeline *obj =
+ (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
+
+ return (struct sync_pt *)sw_sync_pt_create(obj, pt->value);
+}
+
+static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+ struct sw_sync_timeline *obj =
+ (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
+
+ return sw_sync_cmp(obj->value, pt->value) >= 0;
+}
+
+static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
+{
+ struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
+ struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
+
+ return sw_sync_cmp(pt_a->value, pt_b->value);
+}
+
+static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
+ void *data, int size)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+
+ if (size < sizeof(pt->value))
+ return -ENOMEM;
+
+ memcpy(data, &pt->value, sizeof(pt->value));
+
+ return sizeof(pt->value);
+}
+
+static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
+ char *str, int size)
+{
+ struct sw_sync_timeline *timeline =
+ (struct sw_sync_timeline *)sync_timeline;
+ snprintf(str, size, "%d", timeline->value);
+}
+
+static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
+ char *str, int size)
+{
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+
+ snprintf(str, size, "%d", pt->value);
+}
+
+static struct sync_timeline_ops sw_sync_timeline_ops = {
+ .driver_name = "sw_sync",
+ .dup = sw_sync_pt_dup,
+ .has_signaled = sw_sync_pt_has_signaled,
+ .compare = sw_sync_pt_compare,
+ .fill_driver_data = sw_sync_fill_driver_data,
+ .timeline_value_str = sw_sync_timeline_value_str,
+ .pt_value_str = sw_sync_pt_value_str,
+};
+
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
+ sync_timeline_create(&sw_sync_timeline_ops,
+ sizeof(struct sw_sync_timeline),
+ name);
+
+ return obj;
+}
+EXPORT_SYMBOL(sw_sync_timeline_create);
+
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+ obj->value += inc;
+
+ sync_timeline_signal(&obj->obj);
+}
+EXPORT_SYMBOL(sw_sync_timeline_inc);
+
+#ifdef CONFIG_SW_SYNC_USER
+/* *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_open(struct inode *inode, struct file *file)
+{
+ struct sw_sync_timeline *obj;
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, current);
+
+ obj = sw_sync_timeline_create(task_comm);
+ if (!obj)
+ return -ENOMEM;
+
+ file->private_data = obj;
+
+ return 0;
+}
+
+static int sw_sync_release(struct inode *inode, struct file *file)
+{
+ struct sw_sync_timeline *obj = file->private_data;
+
+ sync_timeline_destroy(&obj->obj);
+ return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_pt *pt;
+ struct sync_fence *fence;
+ struct sw_sync_create_fence_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ pt = sw_sync_pt_create(obj, data.value);
+ if (!pt) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+ fence = sync_fence_create(data.name, pt);
+ if (!fence) {
+ sync_pt_free(pt);
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ sync_fence_put(fence);
+ err = -EFAULT;
+ goto err;
+ }
+
+ sync_fence_install(fence, fd);
+
+ return 0;
+
+err:
+ put_unused_fd(fd);
+ return err;
+}
+
+static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
+{
+ u32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ sw_sync_timeline_inc(obj, value);
+
+ return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sw_sync_timeline *obj = file->private_data;
+
+ switch (cmd) {
+ case SW_SYNC_IOC_CREATE_FENCE:
+ return sw_sync_ioctl_create_fence(obj, arg);
+
+ case SW_SYNC_IOC_INC:
+ return sw_sync_ioctl_inc(obj, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sw_sync_fops = {
+ .owner = THIS_MODULE,
+ .open = sw_sync_open,
+ .release = sw_sync_release,
+ .unlocked_ioctl = sw_sync_ioctl,
+ .compat_ioctl = sw_sync_ioctl,
+};
+
+static struct miscdevice sw_sync_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sw_sync",
+ .fops = &sw_sync_fops,
+};
+
+static int __init sw_sync_device_init(void)
+{
+ return misc_register(&sw_sync_dev);
+}
+device_initcall(sw_sync_device_init);
+
+#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/android/sw_sync.h b/drivers/android/sw_sync.h
new file mode 100644
index 0000000..4bf8b86
--- /dev/null
+++ b/drivers/android/sw_sync.h
@@ -0,0 +1,59 @@
+/*
+ * include/linux/sw_sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SW_SYNC_H
+#define _LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <uapi/sync/sw_sync.h>
+#include "sync.h"
+
+struct sw_sync_timeline {
+ struct sync_timeline obj;
+
+ u32 value;
+};
+
+struct sw_sync_pt {
+ struct sync_pt pt;
+
+ u32 value;
+};
+
+#if IS_ENABLED(CONFIG_SW_SYNC)
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
+
+#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/android/sync.c b/drivers/android/sync.c
new file mode 100644
index 0000000..7f0e919
--- /dev/null
+++ b/drivers/android/sync.c
@@ -0,0 +1,734 @@
+/*
+ * drivers/base/sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+
+#include "sync.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace/sync.h"
+
+static const struct fence_ops android_fence_ops;
+static const struct file_operations sync_fence_fops;
+
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+ int size, const char *name)
+{
+ struct sync_timeline *obj;
+
+ if (size < sizeof(struct sync_timeline))
+ return NULL;
+
+ obj = kzalloc(size, GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
+
+ kref_init(&obj->kref);
+ obj->ops = ops;
+ obj->context = fence_context_alloc(1);
+ strlcpy(obj->name, name, sizeof(obj->name));
+
+ INIT_LIST_HEAD(&obj->child_list_head);
+ INIT_LIST_HEAD(&obj->active_list_head);
+ spin_lock_init(&obj->child_list_lock);
+
+ sync_timeline_debug_add(obj);
+
+ return obj;
+}
+EXPORT_SYMBOL(sync_timeline_create);
+
+static void sync_timeline_free(struct kref *kref)
+{
+ struct sync_timeline *obj =
+ container_of(kref, struct sync_timeline, kref);
+
+ sync_timeline_debug_remove(obj);
+
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
+ kfree(obj);
+}
+
+static void sync_timeline_get(struct sync_timeline *obj)
+{
+ kref_get(&obj->kref);
+}
+
+static void sync_timeline_put(struct sync_timeline *obj)
+{
+ kref_put(&obj->kref, sync_timeline_free);
+}
+
+void sync_timeline_destroy(struct sync_timeline *obj)
+{
+ obj->destroyed = true;
+ /*
+ * Ensure timeline is marked as destroyed before
+ * changing timeline's fences status.
+ */
+ smp_wmb();
+
+ /*
+ * signal any children that their parent is going away.
+ */
+ sync_timeline_signal(obj);
+ sync_timeline_put(obj);
+}
+EXPORT_SYMBOL(sync_timeline_destroy);
+
+void sync_timeline_signal(struct sync_timeline *obj)
+{
+ unsigned long flags;
+ LIST_HEAD(signaled_pts);
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+
+ list_for_each_entry_safe(pt, next, &obj->active_list_head,
+ active_list) {
+ if (fence_is_signaled_locked(&pt->base))
+ list_del_init(&pt->active_list);
+ }
+
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+EXPORT_SYMBOL(sync_timeline_signal);
+
+struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
+{
+ unsigned long flags;
+ struct sync_pt *pt;
+
+ if (size < sizeof(struct sync_pt))
+ return NULL;
+
+ pt = kzalloc(size, GFP_KERNEL);
+ if (pt == NULL)
+ return NULL;
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ sync_timeline_get(obj);
+ fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
+ obj->context, ++obj->value);
+ list_add_tail(&pt->child_list, &obj->child_list_head);
+ INIT_LIST_HEAD(&pt->active_list);
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+ return pt;
+}
+EXPORT_SYMBOL(sync_pt_create);
+
+void sync_pt_free(struct sync_pt *pt)
+{
+ fence_put(&pt->base);
+}
+EXPORT_SYMBOL(sync_pt_free);
+
+static struct sync_fence *sync_fence_alloc(int size, const char *name)
+{
+ struct sync_fence *fence;
+
+ fence = kzalloc(size, GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
+ fence, 0);
+ if (IS_ERR(fence->file))
+ goto err;
+
+ kref_init(&fence->kref);
+ strlcpy(fence->name, name, sizeof(fence->name));
+
+ init_waitqueue_head(&fence->wq);
+
+ return fence;
+
+err:
+ kfree(fence);
+ return NULL;
+}
+
+static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+{
+ struct sync_fence_cb *check;
+ struct sync_fence *fence;
+
+ check = container_of(cb, struct sync_fence_cb, cb);
+ fence = check->fence;
+
+ if (atomic_dec_and_test(&fence->status))
+ wake_up_all(&fence->wq);
+}
+
+/* TODO: implement a create which takes more that one sync_pt */
+struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt)
+{
+ struct sync_fence *fence;
+
+ fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
+ if (fence == NULL)
+ return NULL;
+
+ fence->num_fences = 1;
+ atomic_set(&fence->status, 1);
+
+ fence->cbs[0].sync_pt = pt;
+ fence->cbs[0].fence = fence;
+ if (fence_add_callback(pt, &fence->cbs[0].cb, fence_check_cb_func))
+ atomic_dec(&fence->status);
+
+ sync_fence_debug_add(fence);
+
+ return fence;
+}
+EXPORT_SYMBOL(sync_fence_create_dma);
+
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
+{
+ return sync_fence_create_dma(name, &pt->base);
+}
+EXPORT_SYMBOL(sync_fence_create);
+
+struct sync_fence *sync_fence_fdget(int fd)
+{
+ struct file *file = fget(fd);
+
+ if (file == NULL)
+ return NULL;
+
+ if (file->f_op != &sync_fence_fops)
+ goto err;
+
+ return file->private_data;
+
+err:
+ fput(file);
+ return NULL;
+}
+EXPORT_SYMBOL(sync_fence_fdget);
+
+void sync_fence_put(struct sync_fence *fence)
+{
+ fput(fence->file);
+}
+EXPORT_SYMBOL(sync_fence_put);
+
+void sync_fence_install(struct sync_fence *fence, int fd)
+{
+ fd_install(fd, fence->file);
+}
+EXPORT_SYMBOL(sync_fence_install);
+
+static void sync_fence_add_pt(struct sync_fence *fence,
+ int *i, struct fence *pt)
+{
+ fence->cbs[*i].sync_pt = pt;
+ fence->cbs[*i].fence = fence;
+
+ if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
+ fence_get(pt);
+ (*i)++;
+ }
+}
+
+struct sync_fence *sync_fence_merge(const char *name,
+ struct sync_fence *a, struct sync_fence *b)
+{
+ int num_fences = a->num_fences + b->num_fences;
+ struct sync_fence *fence;
+ int i, i_a, i_b;
+ unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
+
+ fence = sync_fence_alloc(size, name);
+ if (fence == NULL)
+ return NULL;
+
+ atomic_set(&fence->status, num_fences);
+
+ /*
+ * Assume sync_fence a and b are both ordered and have no
+ * duplicates with the same context.
+ *
+ * If a sync_fence can only be created with sync_fence_merge
+ * and sync_fence_create, this is a reasonable assumption.
+ */
+ for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
+ struct fence *pt_a = a->cbs[i_a].sync_pt;
+ struct fence *pt_b = b->cbs[i_b].sync_pt;
+
+ if (pt_a->context < pt_b->context) {
+ sync_fence_add_pt(fence, &i, pt_a);
+
+ i_a++;
+ } else if (pt_a->context > pt_b->context) {
+ sync_fence_add_pt(fence, &i, pt_b);
+
+ i_b++;
+ } else {
+ if (pt_a->seqno - pt_b->seqno <= INT_MAX)
+ sync_fence_add_pt(fence, &i, pt_a);
+ else
+ sync_fence_add_pt(fence, &i, pt_b);
+
+ i_a++;
+ i_b++;
+ }
+ }
+
+ for (; i_a < a->num_fences; i_a++)
+ sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
+
+ for (; i_b < b->num_fences; i_b++)
+ sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
+
+ if (num_fences > i)
+ atomic_sub(num_fences - i, &fence->status);
+ fence->num_fences = i;
+
+ sync_fence_debug_add(fence);
+ return fence;
+}
+EXPORT_SYMBOL(sync_fence_merge);
+
+int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
+ int wake_flags, void *key)
+{
+ struct sync_fence_waiter *wait;
+
+ wait = container_of(curr, struct sync_fence_waiter, work);
+ list_del_init(&wait->work.task_list);
+
+ wait->callback(wait->work.private, wait);
+ return 1;
+}
+
+int sync_fence_wait_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter)
+{
+ int err = atomic_read(&fence->status);
+ unsigned long flags;
+
+ if (err < 0)
+ return err;
+
+ if (!err)
+ return 1;
+
+ init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
+ waiter->work.private = fence;
+
+ spin_lock_irqsave(&fence->wq.lock, flags);
+ err = atomic_read(&fence->status);
+ if (err > 0)
+ __add_wait_queue_tail(&fence->wq, &waiter->work);
+ spin_unlock_irqrestore(&fence->wq.lock, flags);
+
+ if (err < 0)
+ return err;
+
+ return !err;
+}
+EXPORT_SYMBOL(sync_fence_wait_async);
+
+int sync_fence_cancel_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&fence->wq.lock, flags);
+ if (!list_empty(&waiter->work.task_list))
+ list_del_init(&waiter->work.task_list);
+ else
+ ret = -ENOENT;
+ spin_unlock_irqrestore(&fence->wq.lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(sync_fence_cancel_async);
+
+int sync_fence_wait(struct sync_fence *fence, long timeout)
+{
+ long ret;
+ int i;
+
+ if (timeout < 0)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(timeout);
+
+ trace_sync_wait(fence, 1);
+ for (i = 0; i < fence->num_fences; ++i)
+ trace_sync_pt(fence->cbs[i].sync_pt);
+ ret = wait_event_interruptible_timeout(fence->wq,
+ atomic_read(&fence->status) <= 0,
+ timeout);
+ trace_sync_wait(fence, 0);
+
+ if (ret < 0) {
+ return ret;
+ } else if (ret == 0) {
+ if (timeout) {
+ pr_info("fence timeout on [%p] after %dms\n", fence,
+ jiffies_to_msecs(timeout));
+ sync_dump();
+ }
+ return -ETIME;
+ }
+
+ ret = atomic_read(&fence->status);
+ if (ret) {
+ pr_info("fence error %ld on [%p]\n", ret, fence);
+ sync_dump();
+ }
+ return ret;
+}
+EXPORT_SYMBOL(sync_fence_wait);
+
+static const char *android_fence_get_driver_name(struct fence *fence)
+{
+ struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+ struct sync_timeline *parent = sync_pt_parent(pt);
+
+ return parent->ops->driver_name;
+}
+
+static const char *android_fence_get_timeline_name(struct fence *fence)
+{
+ struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+ struct sync_timeline *parent = sync_pt_parent(pt);
+
+ return parent->name;
+}
+
+static void android_fence_release(struct fence *fence)
+{
+ struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+ struct sync_timeline *parent = sync_pt_parent(pt);
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ list_del(&pt->child_list);
+ if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
+ list_del(&pt->active_list);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ if (parent->ops->free_pt)
+ parent->ops->free_pt(pt);
+
+ sync_timeline_put(parent);
+ fence_free(&pt->base);
+}
+
+static bool android_fence_signaled(struct fence *fence)
+{
+ struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+ struct sync_timeline *parent = sync_pt_parent(pt);
+ int ret;
+
+ ret = parent->ops->has_signaled(pt);
+ if (ret < 0)
+ fence->status = ret;
+ return ret;
+}
+
+static bool android_fence_enable_signaling(struct fence *fence)
+{
+ struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+ struct sync_timeline *parent = sync_pt_parent(pt);
+
+ if (android_fence_signaled(fence))
+ return false;
+
+ list_add_tail(&pt->active_list, &parent->active_list_head);
+ return true;
+}
+
+static int android_fence_fill_driver_data(struct fence *fence,
+ void *data, int size)
+{
+ struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+ struct sync_timeline *parent = sync_pt_parent(pt);
+
+ if (!parent->ops->fill_driver_data)
+ return 0;
+ return parent->ops->fill_driver_data(pt, data, size);
+}
+
+static void android_fence_value_str(struct fence *fence,
+ char *str, int size)
+{
+ struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+ struct sync_timeline *parent = sync_pt_parent(pt);
+
+ if (!parent->ops->pt_value_str) {
+ if (size)
+ *str = 0;
+ return;
+ }
+ parent->ops->pt_value_str(pt, str, size);
+}
+
+static void android_fence_timeline_value_str(struct fence *fence,
+ char *str, int size)
+{
+ struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+ struct sync_timeline *parent = sync_pt_parent(pt);
+
+ if (!parent->ops->timeline_value_str) {
+ if (size)
+ *str = 0;
+ return;
+ }
+ parent->ops->timeline_value_str(parent, str, size);
+}
+
+static const struct fence_ops android_fence_ops = {
+ .get_driver_name = android_fence_get_driver_name,
+ .get_timeline_name = android_fence_get_timeline_name,
+ .enable_signaling = android_fence_enable_signaling,
+ .signaled = android_fence_signaled,
+ .wait = fence_default_wait,
+ .release = android_fence_release,
+ .fill_driver_data = android_fence_fill_driver_data,
+ .fence_value_str = android_fence_value_str,
+ .timeline_value_str = android_fence_timeline_value_str,
+};
+
+static void sync_fence_free(struct kref *kref)
+{
+ struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+ int i, status = atomic_read(&fence->status);
+
+ for (i = 0; i < fence->num_fences; ++i) {
+ if (status)
+ fence_remove_callback(fence->cbs[i].sync_pt,
+ &fence->cbs[i].cb);
+ fence_put(fence->cbs[i].sync_pt);
+ }
+
+ kfree(fence);
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file)
+{
+ struct sync_fence *fence = file->private_data;
+
+ sync_fence_debug_remove(fence);
+
+ kref_put(&fence->kref, sync_fence_free);
+ return 0;
+}
+
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
+{
+ struct sync_fence *fence = file->private_data;
+ int status;
+
+ poll_wait(file, &fence->wq, wait);
+
+ status = atomic_read(&fence->status);
+
+ if (!status)
+ return POLLIN;
+ else if (status < 0)
+ return POLLERR;
+ return 0;
+}
+
+static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
+{
+ __s32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ return sync_fence_wait(fence, value);
+}
+
+static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_fence *fence2, *fence3;
+ struct sync_merge_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fd;
+ }
+
+ fence2 = sync_fence_fdget(data.fd2);
+ if (fence2 == NULL) {
+ err = -ENOENT;
+ goto err_put_fd;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+ fence3 = sync_fence_merge(data.name, fence, fence2);
+ if (fence3 == NULL) {
+ err = -ENOMEM;
+ goto err_put_fence2;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fence3;
+ }
+
+ sync_fence_install(fence3, fd);
+ sync_fence_put(fence2);
+ return 0;
+
+err_put_fence3:
+ sync_fence_put(fence3);
+
+err_put_fence2:
+ sync_fence_put(fence2);
+
+err_put_fd:
+ put_unused_fd(fd);
+ return err;
+}
+
+static int sync_fill_pt_info(struct fence *fence, void *data, int size)
+{
+ struct sync_pt_info *info = data;
+ int ret;
+
+ if (size < sizeof(struct sync_pt_info))
+ return -ENOMEM;
+
+ info->len = sizeof(struct sync_pt_info);
+
+ if (fence->ops->fill_driver_data) {
+ ret = fence->ops->fill_driver_data(fence, info->driver_data,
+ size - sizeof(*info));
+ if (ret < 0)
+ return ret;
+
+ info->len += ret;
+ }
+
+ strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ sizeof(info->obj_name));
+ strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
+ sizeof(info->driver_name));
+ if (fence_is_signaled(fence))
+ info->status = fence->status >= 0 ? 1 : fence->status;
+ else
+ info->status = 0;
+ info->timestamp_ns = ktime_to_ns(fence->timestamp);
+
+ return info->len;
+}
+
+static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
+ unsigned long arg)
+{
+ struct sync_fence_info_data *data;
+ __u32 size;
+ __u32 len = 0;
+ int ret, i;
+
+ if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
+ return -EFAULT;
+
+ if (size < sizeof(struct sync_fence_info_data))
+ return -EINVAL;
+
+ if (size > 4096)
+ size = 4096;
+
+ data = kzalloc(size, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ strlcpy(data->name, fence->name, sizeof(data->name));
+ data->status = atomic_read(&fence->status);
+ if (data->status >= 0)
+ data->status = !data->status;
+
+ len = sizeof(struct sync_fence_info_data);
+
+ for (i = 0; i < fence->num_fences; ++i) {
+ struct fence *pt = fence->cbs[i].sync_pt;
+
+ ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
+
+ if (ret < 0)
+ goto out;
+
+ len += ret;
+ }
+
+ data->len = len;
+
+ if (copy_to_user((void __user *)arg, data, len))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+out:
+ kfree(data);
+
+ return ret;
+}
+
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_fence *fence = file->private_data;
+
+ switch (cmd) {
+ case SYNC_IOC_WAIT:
+ return sync_fence_ioctl_wait(fence, arg);
+
+ case SYNC_IOC_MERGE:
+ return sync_fence_ioctl_merge(fence, arg);
+
+ case SYNC_IOC_FENCE_INFO:
+ return sync_fence_ioctl_fence_info(fence, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sync_fence_fops = {
+ .release = sync_fence_release,
+ .poll = sync_fence_poll,
+ .unlocked_ioctl = sync_fence_ioctl,
+ .compat_ioctl = sync_fence_ioctl,
+};
+
diff --git a/drivers/android/sync.h b/drivers/android/sync.h
new file mode 100644
index 0000000..4ccff01
--- /dev/null
+++ b/drivers/android/sync.h
@@ -0,0 +1,366 @@
+/*
+ * include/linux/sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/fence.h>
+
+#include <uapi/sync/sync.h>
+
+struct sync_timeline;
+struct sync_pt;
+struct sync_fence;
+
+/**
+ * struct sync_timeline_ops - sync object implementation ops
+ * @driver_name: name of the implementation
+ * @dup: duplicate a sync_pt
+ * @has_signaled: returns:
+ * 1 if pt has signaled
+ * 0 if pt has not signaled
+ * <0 on error
+ * @compare: returns:
+ * 1 if b will signal before a
+ * 0 if a and b will signal at the same time
+ * -1 if a will signal before b
+ * @free_pt: called before sync_pt is freed
+ * @release_obj: called before sync_timeline is freed
+ * @fill_driver_data: write implementation specific driver data to data.
+ * should return an error if there is not enough room
+ * as specified by size. This information is returned
+ * to userspace by SYNC_IOC_FENCE_INFO.
+ * @timeline_value_str: fill str with the value of the sync_timeline's counter
+ * @pt_value_str: fill str with the value of the sync_pt
+ */
+struct sync_timeline_ops {
+ const char *driver_name;
+
+ /* required */
+ struct sync_pt * (*dup)(struct sync_pt *pt);
+
+ /* required */
+ int (*has_signaled)(struct sync_pt *pt);
+
+ /* required */
+ int (*compare)(struct sync_pt *a, struct sync_pt *b);
+
+ /* optional */
+ void (*free_pt)(struct sync_pt *sync_pt);
+
+ /* optional */
+ void (*release_obj)(struct sync_timeline *sync_timeline);
+
+ /* optional */
+ int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
+
+ /* optional */
+ void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
+ int size);
+
+ /* optional */
+ void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
+};
+
+/**
+ * struct sync_timeline - sync object
+ * @kref: reference count on fence.
+ * @ops: ops that define the implementation of the sync_timeline
+ * @name: name of the sync_timeline. Useful for debugging
+ * @destroyed: set when sync_timeline is destroyed
+ * @child_list_head: list of children sync_pts for this sync_timeline
+ * @child_list_lock: lock protecting @child_list_head, destroyed, and
+ * sync_pt.status
+ * @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list: membership in global sync_timeline_list
+ */
+struct sync_timeline {
+ struct kref kref;
+ const struct sync_timeline_ops *ops;
+ char name[32];
+
+ /* protected by child_list_lock */
+ bool destroyed;
+ int context, value;
+
+ struct list_head child_list_head;
+ spinlock_t child_list_lock;
+
+ struct list_head active_list_head;
+
+#ifdef CONFIG_DEBUG_FS
+ struct list_head sync_timeline_list;
+#endif
+};
+
+/**
+ * struct sync_pt - sync point
+ * @fence: base fence class
+ * @child_list: membership in sync_timeline.child_list_head
+ * @active_list: membership in sync_timeline.active_list_head
+ * @signaled_list: membership in temporary signaled_list on stack
+ * @fence: sync_fence to which the sync_pt belongs
+ * @pt_list: membership in sync_fence.pt_list_head
+ * @status: 1: signaled, 0:active, <0: error
+ * @timestamp: time which sync_pt status transitioned from active to
+ * signaled or error.
+ */
+struct sync_pt {
+ struct fence base;
+
+ struct list_head child_list;
+ struct list_head active_list;
+};
+
+static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+{
+ return container_of(pt->base.lock, struct sync_timeline,
+ child_list_lock);
+}
+
+struct sync_fence_cb {
+ struct fence_cb cb;
+ struct fence *sync_pt;
+ struct sync_fence *fence;
+};
+
+/**
+ * struct sync_fence - sync fence
+ * @file: file representing this fence
+ * @kref: reference count on fence.
+ * @name: name of sync_fence. Useful for debugging
+ * @pt_list_head: list of sync_pts in the fence. immutable once fence
+ * is created
+ * @status: 0: signaled, >0:active, <0: error
+ *
+ * @wq: wait queue for fence signaling
+ * @sync_fence_list: membership in global fence list
+ */
+struct sync_fence {
+ struct file *file;
+ struct kref kref;
+ char name[32];
+#ifdef CONFIG_DEBUG_FS
+ struct list_head sync_fence_list;
+#endif
+ int num_fences;
+
+ wait_queue_head_t wq;
+ atomic_t status;
+
+ struct sync_fence_cb cbs[];
+};
+
+struct sync_fence_waiter;
+typedef void (*sync_callback_t)(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
+ * @waiter_list: membership in sync_fence.waiter_list_head
+ * @callback: function pointer to call when fence signals
+ * @callback_data: pointer to pass to @callback
+ */
+struct sync_fence_waiter {
+ wait_queue_t work;
+ sync_callback_t callback;
+};
+
+static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
+ sync_callback_t callback)
+{
+ INIT_LIST_HEAD(&waiter->work.task_list);
+ waiter->callback = callback;
+}
+
+/*
+ * API for sync_timeline implementers
+ */
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @ops: specifies the implementation ops for the object
+ * @size: size to allocate for this obj
+ * @name: sync_timeline name
+ *
+ * Creates a new sync_timeline which will use the implementation specified by
+ * @ops. @size bytes will be allocated allowing for implementation specific
+ * data to be kept after the generic sync_timeline struct.
+ */
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+ int size, const char *name);
+
+/**
+ * sync_timeline_destroy() - destroys a sync object
+ * @obj: sync_timeline to destroy
+ *
+ * A sync implementation should call this when the @obj is going away
+ * (i.e. module unload.) @obj won't actually be freed until all its children
+ * sync_pts are freed.
+ */
+void sync_timeline_destroy(struct sync_timeline *obj);
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ *
+ * A sync implementation should call this any time one of it's sync_pts
+ * has signaled or has an error condition.
+ */
+void sync_timeline_signal(struct sync_timeline *obj);
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent: sync_pt's parent sync_timeline
+ * @size: size to allocate for this pt
+ *
+ * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct.
+ */
+struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
+
+/**
+ * sync_pt_free() - frees a sync pt
+ * @pt: sync_pt to free
+ *
+ * This should only be called on sync_pts which have been created but
+ * not added to a fence.
+ */
+void sync_pt_free(struct sync_pt *pt);
+
+/**
+ * sync_fence_create() - creates a sync fence
+ * @name: name of fence to create
+ * @pt: sync_pt to add to the fence
+ *
+ * Creates a fence containg @pt. Once this is called, the fence takes
+ * ownership of @pt.
+ */
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
+
+/**
+ * sync_fence_create_dma() - creates a sync fence from dma-fence
+ * @name: name of fence to create
+ * @pt: dma-fence to add to the fence
+ *
+ * Creates a fence containg @pt. Once this is called, the fence takes
+ * ownership of @pt.
+ */
+struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt);
+
+/*
+ * API for sync_fence consumers
+ */
+
+/**
+ * sync_fence_merge() - merge two fences
+ * @name: name of new fence
+ * @a: fence a
+ * @b: fence b
+ *
+ * Creates a new fence which contains copies of all the sync_pts in both
+ * @a and @b. @a and @b remain valid, independent fences.
+ */
+struct sync_fence *sync_fence_merge(const char *name,
+ struct sync_fence *a, struct sync_fence *b);
+
+/**
+ * sync_fence_fdget() - get a fence from an fd
+ * @fd: fd referencing a fence
+ *
+ * Ensures @fd references a valid fence, increments the refcount of the backing
+ * file, and returns the fence.
+ */
+struct sync_fence *sync_fence_fdget(int fd);
+
+/**
+ * sync_fence_put() - puts a reference of a sync fence
+ * @fence: fence to put
+ *
+ * Puts a reference on @fence. If this is the last reference, the fence and
+ * all it's sync_pts will be freed
+ */
+void sync_fence_put(struct sync_fence *fence);
+
+/**
+ * sync_fence_install() - installs a fence into a file descriptor
+ * @fence: fence to install
+ * @fd: file descriptor in which to install the fence
+ *
+ * Installs @fence into @fd. @fd's should be acquired through
+ * get_unused_fd_flags(O_CLOEXEC).
+ */
+void sync_fence_install(struct sync_fence *fence, int fd);
+
+/**
+ * sync_fence_wait_async() - registers and async wait on the fence
+ * @fence: fence to wait on
+ * @waiter: waiter callback struck
+ *
+ * Returns 1 if @fence has already signaled.
+ *
+ * Registers a callback to be called when @fence signals or has an error.
+ * @waiter should be initialized with sync_fence_waiter_init().
+ */
+int sync_fence_wait_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * sync_fence_cancel_async() - cancels an async wait
+ * @fence: fence to wait on
+ * @waiter: waiter callback struck
+ *
+ * returns 0 if waiter was removed from fence's async waiter list.
+ * returns -ENOENT if waiter was not found on fence's async waiter list.
+ *
+ * Cancels a previously registered async wait. Will fail gracefully if
+ * @waiter was never registered or if @fence has already signaled @waiter.
+ */
+int sync_fence_cancel_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * sync_fence_wait() - wait on fence
+ * @fence: fence to wait on
+ * @tiemout: timeout in ms
+ *
+ * Wait for @fence to be signaled or have an error. Waits indefinitely
+ * if @timeout < 0
+ */
+int sync_fence_wait(struct sync_fence *fence, long timeout);
+
+#ifdef CONFIG_DEBUG_FS
+
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_fence_debug_add(struct sync_fence *fence);
+void sync_fence_debug_remove(struct sync_fence *fence);
+void sync_dump(void);
+
+#else
+# define sync_timeline_debug_add(obj)
+# define sync_timeline_debug_remove(obj)
+# define sync_fence_debug_add(fence)
+# define sync_fence_debug_remove(fence)
+# define sync_dump()
+#endif
+int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
+ int wake_flags, void *key);
+
+#endif /* _LINUX_SYNC_H */
diff --git a/drivers/android/sync_debug.c b/drivers/android/sync_debug.c
new file mode 100644
index 0000000..02a1649
--- /dev/null
+++ b/drivers/android/sync_debug.c
@@ -0,0 +1,255 @@
+/*
+ * drivers/base/sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+#include <linux/time64.h>
+#include "sync.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+static LIST_HEAD(sync_fence_list_head);
+static DEFINE_SPINLOCK(sync_fence_list_lock);
+
+void sync_timeline_debug_add(struct sync_timeline *obj)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_timeline_debug_remove(struct sync_timeline *obj)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_del(&obj->sync_timeline_list);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_fence_debug_add(struct sync_fence *fence)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+}
+
+void sync_fence_debug_remove(struct sync_fence *fence)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_del(&fence->sync_fence_list);
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+}
+
+static const char *sync_status_str(int status)
+{
+ if (status == 0)
+ return "signaled";
+
+ if (status > 0)
+ return "active";
+
+ return "error";
+}
+
+static void sync_print_pt(struct seq_file *s, struct fence *pt, bool fence)
+{
+ int status = 1;
+
+ if (fence_is_signaled_locked(pt))
+ status = pt->status;
+
+ seq_printf(s, " %s%spt %s",
+ fence && pt->ops->get_timeline_name ?
+ pt->ops->get_timeline_name(pt) : "",
+ fence ? "_" : "",
+ sync_status_str(status));
+
+ if (status <= 0) {
+ struct timespec64 ts64 =
+ ktime_to_timespec64(pt->timestamp);
+
+ seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
+ }
+
+ if ((!fence || pt->ops->timeline_value_str) &&
+ pt->ops->fence_value_str) {
+ char value[64];
+ bool success;
+
+ pt->ops->fence_value_str(pt, value, sizeof(value));
+ success = strlen(value);
+
+ if (success)
+ seq_printf(s, ": %s", value);
+
+ if (success && fence) {
+ pt->ops->timeline_value_str(pt, value, sizeof(value));
+
+ if (strlen(value))
+ seq_printf(s, " / %s", value);
+ }
+ }
+
+ seq_puts(s, "\n");
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
+
+ if (obj->ops->timeline_value_str) {
+ char value[64];
+
+ obj->ops->timeline_value_str(obj, value, sizeof(value));
+ seq_printf(s, ": %s", value);
+ }
+
+ seq_puts(s, "\n");
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ list_for_each(pos, &obj->child_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, child_list);
+ sync_print_pt(s, &pt->base, false);
+ }
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+{
+ wait_queue_t *pos;
+ unsigned long flags;
+ int i;
+
+ seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+ sync_status_str(atomic_read(&fence->status)));
+
+ for (i = 0; i < fence->num_fences; ++i)
+ sync_print_pt(s, fence->cbs[i].sync_pt, true);
+
+ spin_lock_irqsave(&fence->wq.lock, flags);
+ list_for_each_entry(pos, &fence->wq.task_list, task_list) {
+ struct sync_fence_waiter *waiter;
+
+ if (pos->func != &sync_fence_wake_up_wq)
+ continue;
+
+ waiter = container_of(pos, struct sync_fence_waiter, work);
+
+ seq_printf(s, "waiter %pF\n", waiter->callback);
+ }
+ spin_unlock_irqrestore(&fence->wq.lock, flags);
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ struct list_head *pos;
+
+ seq_puts(s, "objs:\n--------------\n");
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_for_each(pos, &sync_timeline_list_head) {
+ struct sync_timeline *obj =
+ container_of(pos, struct sync_timeline,
+ sync_timeline_list);
+
+ sync_print_obj(s, obj);
+ seq_puts(s, "\n");
+ }
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+ seq_puts(s, "fences:\n--------------\n");
+
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_for_each(pos, &sync_fence_list_head) {
+ struct sync_fence *fence =
+ container_of(pos, struct sync_fence, sync_fence_list);
+
+ sync_print_fence(s, fence);
+ seq_puts(s, "\n");
+ }
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+ return 0;
+}
+
+static int sync_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_debugfs_fops = {
+ .open = sync_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+ debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+ return 0;
+}
+late_initcall(sync_debugfs_init);
+
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+ struct seq_file s = {
+ .buf = sync_dump_buf,
+ .size = sizeof(sync_dump_buf) - 1,
+ };
+ int i;
+
+ sync_debugfs_show(&s, NULL);
+
+ for (i = 0; i < s.count; i += DUMP_CHUNK) {
+ if ((s.count - i) > DUMP_CHUNK) {
+ char c = s.buf[i + DUMP_CHUNK];
+
+ s.buf[i + DUMP_CHUNK] = 0;
+ pr_cont("%s", s.buf + i);
+ s.buf[i + DUMP_CHUNK] = c;
+ } else {
+ s.buf[s.count] = 0;
+ pr_cont("%s", s.buf + i);
+ }
+ }
+}
+
+#endif
diff --git a/drivers/android/trace/sync.h b/drivers/android/trace/sync.h
new file mode 100644
index 0000000..7dcf2fe
--- /dev/null
+++ b/drivers/android/trace/sync.h
@@ -0,0 +1,82 @@
+#undef TRACE_SYSTEM
+#define TRACE_INCLUDE_PATH ../../drivers/android/trace
+#define TRACE_SYSTEM sync
+
+#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SYNC_H
+
+#include "../sync.h"
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sync_timeline,
+ TP_PROTO(struct sync_timeline *timeline),
+
+ TP_ARGS(timeline),
+
+ TP_STRUCT__entry(
+ __string(name, timeline->name)
+ __array(char, value, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, timeline->name);
+ if (timeline->ops->timeline_value_str) {
+ timeline->ops->timeline_value_str(timeline,
+ __entry->value,
+ sizeof(__entry->value));
+ } else {
+ __entry->value[0] = '\0';
+ }
+ ),
+
+ TP_printk("name=%s value=%s", __get_str(name), __entry->value)
+);
+
+TRACE_EVENT(sync_wait,
+ TP_PROTO(struct sync_fence *fence, int begin),
+
+ TP_ARGS(fence, begin),
+
+ TP_STRUCT__entry(
+ __string(name, fence->name)
+ __field(s32, status)
+ __field(u32, begin)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, fence->name);
+ __entry->status = atomic_read(&fence->status);
+ __entry->begin = begin;
+ ),
+
+ TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end",
+ __get_str(name), __entry->status)
+);
+
+TRACE_EVENT(sync_pt,
+ TP_PROTO(struct fence *pt),
+
+ TP_ARGS(pt),
+
+ TP_STRUCT__entry(
+ __string(timeline, pt->ops->get_timeline_name(pt))
+ __array(char, value, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(timeline, pt->ops->get_timeline_name(pt));
+ if (pt->ops->fence_value_str) {
+ pt->ops->fence_value_str(pt, __entry->value,
+ sizeof(__entry->value));
+ } else {
+ __entry->value[0] = '\0';
+ }
+ ),
+
+ TP_printk("name=%s value=%s", __get_str(timeline), __entry->value)
+);
+
+#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 42b1512..4b18fee 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -38,34 +38,6 @@ config ANDROID_LOW_MEMORY_KILLER
scripts (/init.rc), and it defines priority values with minimum free memory size
for each priority.
-config SYNC
- bool "Synchronization framework"
- default n
- select ANON_INODES
- select DMA_SHARED_BUFFER
- ---help---
- This option enables the framework for synchronization between multiple
- drivers. Sync implementations can take advantage of hardware
- synchronization built into devices like GPUs.
-
-config SW_SYNC
- bool "Software synchronization objects"
- default n
- depends on SYNC
- ---help---
- A sync object driver that uses a 32bit counter to coordinate
- synchronization. Useful when there is no hardware primitive backing
- the synchronization.
-
-config SW_SYNC_USER
- bool "Userspace API for SW_SYNC"
- default n
- depends on SW_SYNC
- ---help---
- Provides a user space API to the sw sync object.
- *WARNING* improper use of this can result in deadlocking kernel
- drivers from userspace.
-
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c7b6c99..355ad0e 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -6,5 +6,3 @@ obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
-obj-$(CONFIG_SYNC) += sync.o sync_debug.o
-obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
deleted file mode 100644
index c4ff167..0000000
--- a/drivers/staging/android/sw_sync.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * drivers/base/sw_sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-
-#include "sw_sync.h"
-
-static int sw_sync_cmp(u32 a, u32 b)
-{
- if (a == b)
- return 0;
-
- return ((s32)a - (s32)b) < 0 ? -1 : 1;
-}
-
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
-{
- struct sw_sync_pt *pt;
-
- pt = (struct sw_sync_pt *)
- sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
-
- pt->value = value;
-
- return (struct sync_pt *)pt;
-}
-EXPORT_SYMBOL(sw_sync_pt_create);
-
-static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
-
- return (struct sync_pt *)sw_sync_pt_create(obj, pt->value);
-}
-
-static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
-
- return sw_sync_cmp(obj->value, pt->value) >= 0;
-}
-
-static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
-{
- struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
- struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
-
- return sw_sync_cmp(pt_a->value, pt_b->value);
-}
-
-static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
- void *data, int size)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-
- if (size < sizeof(pt->value))
- return -ENOMEM;
-
- memcpy(data, &pt->value, sizeof(pt->value));
-
- return sizeof(pt->value);
-}
-
-static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
- char *str, int size)
-{
- struct sw_sync_timeline *timeline =
- (struct sw_sync_timeline *)sync_timeline;
- snprintf(str, size, "%d", timeline->value);
-}
-
-static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
- char *str, int size)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-
- snprintf(str, size, "%d", pt->value);
-}
-
-static struct sync_timeline_ops sw_sync_timeline_ops = {
- .driver_name = "sw_sync",
- .dup = sw_sync_pt_dup,
- .has_signaled = sw_sync_pt_has_signaled,
- .compare = sw_sync_pt_compare,
- .fill_driver_data = sw_sync_fill_driver_data,
- .timeline_value_str = sw_sync_timeline_value_str,
- .pt_value_str = sw_sync_pt_value_str,
-};
-
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
-{
- struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
- sync_timeline_create(&sw_sync_timeline_ops,
- sizeof(struct sw_sync_timeline),
- name);
-
- return obj;
-}
-EXPORT_SYMBOL(sw_sync_timeline_create);
-
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
-{
- obj->value += inc;
-
- sync_timeline_signal(&obj->obj);
-}
-EXPORT_SYMBOL(sw_sync_timeline_inc);
-
-#ifdef CONFIG_SW_SYNC_USER
-/* *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-static int sw_sync_open(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj;
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, current);
-
- obj = sw_sync_timeline_create(task_comm);
- if (!obj)
- return -ENOMEM;
-
- file->private_data = obj;
-
- return 0;
-}
-
-static int sw_sync_release(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- sync_timeline_destroy(&obj->obj);
- return 0;
-}
-
-static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_pt *pt;
- struct sync_fence *fence;
- struct sw_sync_create_fence_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err;
- }
-
- pt = sw_sync_pt_create(obj, data.value);
- if (!pt) {
- err = -ENOMEM;
- goto err;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence = sync_fence_create(data.name, pt);
- if (!fence) {
- sync_pt_free(pt);
- err = -ENOMEM;
- goto err;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- sync_fence_put(fence);
- err = -EFAULT;
- goto err;
- }
-
- sync_fence_install(fence, fd);
-
- return 0;
-
-err:
- put_unused_fd(fd);
- return err;
-}
-
-static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
-{
- u32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- sw_sync_timeline_inc(obj, value);
-
- return 0;
-}
-
-static long sw_sync_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- switch (cmd) {
- case SW_SYNC_IOC_CREATE_FENCE:
- return sw_sync_ioctl_create_fence(obj, arg);
-
- case SW_SYNC_IOC_INC:
- return sw_sync_ioctl_inc(obj, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sw_sync_fops = {
- .owner = THIS_MODULE,
- .open = sw_sync_open,
- .release = sw_sync_release,
- .unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
-};
-
-static struct miscdevice sw_sync_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sw_sync",
- .fops = &sw_sync_fops,
-};
-
-static int __init sw_sync_device_init(void)
-{
- return misc_register(&sw_sync_dev);
-}
-device_initcall(sw_sync_device_init);
-
-#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
deleted file mode 100644
index c87ae9e..0000000
--- a/drivers/staging/android/sw_sync.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * include/linux/sw_sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SW_SYNC_H
-#define _LINUX_SW_SYNC_H
-
-#include <linux/types.h>
-#include <linux/kconfig.h>
-#include "sync.h"
-#include "uapi/sw_sync.h"
-
-struct sw_sync_timeline {
- struct sync_timeline obj;
-
- u32 value;
-};
-
-struct sw_sync_pt {
- struct sync_pt pt;
-
- u32 value;
-};
-
-#if IS_ENABLED(CONFIG_SW_SYNC)
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
-
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
-#else
-static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
-{
- return NULL;
-}
-
-static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
-{
-}
-
-static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
- u32 value)
-{
- return NULL;
-}
-#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
-
-#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
deleted file mode 100644
index 7f0e919..0000000
--- a/drivers/staging/android/sync.c
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- * drivers/base/sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-
-#include "sync.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace/sync.h"
-
-static const struct fence_ops android_fence_ops;
-static const struct file_operations sync_fence_fops;
-
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
- int size, const char *name)
-{
- struct sync_timeline *obj;
-
- if (size < sizeof(struct sync_timeline))
- return NULL;
-
- obj = kzalloc(size, GFP_KERNEL);
- if (obj == NULL)
- return NULL;
-
- kref_init(&obj->kref);
- obj->ops = ops;
- obj->context = fence_context_alloc(1);
- strlcpy(obj->name, name, sizeof(obj->name));
-
- INIT_LIST_HEAD(&obj->child_list_head);
- INIT_LIST_HEAD(&obj->active_list_head);
- spin_lock_init(&obj->child_list_lock);
-
- sync_timeline_debug_add(obj);
-
- return obj;
-}
-EXPORT_SYMBOL(sync_timeline_create);
-
-static void sync_timeline_free(struct kref *kref)
-{
- struct sync_timeline *obj =
- container_of(kref, struct sync_timeline, kref);
-
- sync_timeline_debug_remove(obj);
-
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
- kfree(obj);
-}
-
-static void sync_timeline_get(struct sync_timeline *obj)
-{
- kref_get(&obj->kref);
-}
-
-static void sync_timeline_put(struct sync_timeline *obj)
-{
- kref_put(&obj->kref, sync_timeline_free);
-}
-
-void sync_timeline_destroy(struct sync_timeline *obj)
-{
- obj->destroyed = true;
- /*
- * Ensure timeline is marked as destroyed before
- * changing timeline's fences status.
- */
- smp_wmb();
-
- /*
- * signal any children that their parent is going away.
- */
- sync_timeline_signal(obj);
- sync_timeline_put(obj);
-}
-EXPORT_SYMBOL(sync_timeline_destroy);
-
-void sync_timeline_signal(struct sync_timeline *obj)
-{
- unsigned long flags;
- LIST_HEAD(signaled_pts);
- struct sync_pt *pt, *next;
-
- trace_sync_timeline(obj);
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
-
- list_for_each_entry_safe(pt, next, &obj->active_list_head,
- active_list) {
- if (fence_is_signaled_locked(&pt->base))
- list_del_init(&pt->active_list);
- }
-
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-EXPORT_SYMBOL(sync_timeline_signal);
-
-struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
-{
- unsigned long flags;
- struct sync_pt *pt;
-
- if (size < sizeof(struct sync_pt))
- return NULL;
-
- pt = kzalloc(size, GFP_KERNEL);
- if (pt == NULL)
- return NULL;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- sync_timeline_get(obj);
- fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
- obj->context, ++obj->value);
- list_add_tail(&pt->child_list, &obj->child_list_head);
- INIT_LIST_HEAD(&pt->active_list);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
- return pt;
-}
-EXPORT_SYMBOL(sync_pt_create);
-
-void sync_pt_free(struct sync_pt *pt)
-{
- fence_put(&pt->base);
-}
-EXPORT_SYMBOL(sync_pt_free);
-
-static struct sync_fence *sync_fence_alloc(int size, const char *name)
-{
- struct sync_fence *fence;
-
- fence = kzalloc(size, GFP_KERNEL);
- if (fence == NULL)
- return NULL;
-
- fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
- fence, 0);
- if (IS_ERR(fence->file))
- goto err;
-
- kref_init(&fence->kref);
- strlcpy(fence->name, name, sizeof(fence->name));
-
- init_waitqueue_head(&fence->wq);
-
- return fence;
-
-err:
- kfree(fence);
- return NULL;
-}
-
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
-{
- struct sync_fence_cb *check;
- struct sync_fence *fence;
-
- check = container_of(cb, struct sync_fence_cb, cb);
- fence = check->fence;
-
- if (atomic_dec_and_test(&fence->status))
- wake_up_all(&fence->wq);
-}
-
-/* TODO: implement a create which takes more that one sync_pt */
-struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt)
-{
- struct sync_fence *fence;
-
- fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
- if (fence == NULL)
- return NULL;
-
- fence->num_fences = 1;
- atomic_set(&fence->status, 1);
-
- fence->cbs[0].sync_pt = pt;
- fence->cbs[0].fence = fence;
- if (fence_add_callback(pt, &fence->cbs[0].cb, fence_check_cb_func))
- atomic_dec(&fence->status);
-
- sync_fence_debug_add(fence);
-
- return fence;
-}
-EXPORT_SYMBOL(sync_fence_create_dma);
-
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
-{
- return sync_fence_create_dma(name, &pt->base);
-}
-EXPORT_SYMBOL(sync_fence_create);
-
-struct sync_fence *sync_fence_fdget(int fd)
-{
- struct file *file = fget(fd);
-
- if (file == NULL)
- return NULL;
-
- if (file->f_op != &sync_fence_fops)
- goto err;
-
- return file->private_data;
-
-err:
- fput(file);
- return NULL;
-}
-EXPORT_SYMBOL(sync_fence_fdget);
-
-void sync_fence_put(struct sync_fence *fence)
-{
- fput(fence->file);
-}
-EXPORT_SYMBOL(sync_fence_put);
-
-void sync_fence_install(struct sync_fence *fence, int fd)
-{
- fd_install(fd, fence->file);
-}
-EXPORT_SYMBOL(sync_fence_install);
-
-static void sync_fence_add_pt(struct sync_fence *fence,
- int *i, struct fence *pt)
-{
- fence->cbs[*i].sync_pt = pt;
- fence->cbs[*i].fence = fence;
-
- if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
- fence_get(pt);
- (*i)++;
- }
-}
-
-struct sync_fence *sync_fence_merge(const char *name,
- struct sync_fence *a, struct sync_fence *b)
-{
- int num_fences = a->num_fences + b->num_fences;
- struct sync_fence *fence;
- int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
-
- fence = sync_fence_alloc(size, name);
- if (fence == NULL)
- return NULL;
-
- atomic_set(&fence->status, num_fences);
-
- /*
- * Assume sync_fence a and b are both ordered and have no
- * duplicates with the same context.
- *
- * If a sync_fence can only be created with sync_fence_merge
- * and sync_fence_create, this is a reasonable assumption.
- */
- for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].sync_pt;
- struct fence *pt_b = b->cbs[i_b].sync_pt;
-
- if (pt_a->context < pt_b->context) {
- sync_fence_add_pt(fence, &i, pt_a);
-
- i_a++;
- } else if (pt_a->context > pt_b->context) {
- sync_fence_add_pt(fence, &i, pt_b);
-
- i_b++;
- } else {
- if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_fence_add_pt(fence, &i, pt_a);
- else
- sync_fence_add_pt(fence, &i, pt_b);
-
- i_a++;
- i_b++;
- }
- }
-
- for (; i_a < a->num_fences; i_a++)
- sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
-
- for (; i_b < b->num_fences; i_b++)
- sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
-
- if (num_fences > i)
- atomic_sub(num_fences - i, &fence->status);
- fence->num_fences = i;
-
- sync_fence_debug_add(fence);
- return fence;
-}
-EXPORT_SYMBOL(sync_fence_merge);
-
-int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
- int wake_flags, void *key)
-{
- struct sync_fence_waiter *wait;
-
- wait = container_of(curr, struct sync_fence_waiter, work);
- list_del_init(&wait->work.task_list);
-
- wait->callback(wait->work.private, wait);
- return 1;
-}
-
-int sync_fence_wait_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- int err = atomic_read(&fence->status);
- unsigned long flags;
-
- if (err < 0)
- return err;
-
- if (!err)
- return 1;
-
- init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
- waiter->work.private = fence;
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- err = atomic_read(&fence->status);
- if (err > 0)
- __add_wait_queue_tail(&fence->wq, &waiter->work);
- spin_unlock_irqrestore(&fence->wq.lock, flags);
-
- if (err < 0)
- return err;
-
- return !err;
-}
-EXPORT_SYMBOL(sync_fence_wait_async);
-
-int sync_fence_cancel_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- if (!list_empty(&waiter->work.task_list))
- list_del_init(&waiter->work.task_list);
- else
- ret = -ENOENT;
- spin_unlock_irqrestore(&fence->wq.lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(sync_fence_cancel_async);
-
-int sync_fence_wait(struct sync_fence *fence, long timeout)
-{
- long ret;
- int i;
-
- if (timeout < 0)
- timeout = MAX_SCHEDULE_TIMEOUT;
- else
- timeout = msecs_to_jiffies(timeout);
-
- trace_sync_wait(fence, 1);
- for (i = 0; i < fence->num_fences; ++i)
- trace_sync_pt(fence->cbs[i].sync_pt);
- ret = wait_event_interruptible_timeout(fence->wq,
- atomic_read(&fence->status) <= 0,
- timeout);
- trace_sync_wait(fence, 0);
-
- if (ret < 0) {
- return ret;
- } else if (ret == 0) {
- if (timeout) {
- pr_info("fence timeout on [%p] after %dms\n", fence,
- jiffies_to_msecs(timeout));
- sync_dump();
- }
- return -ETIME;
- }
-
- ret = atomic_read(&fence->status);
- if (ret) {
- pr_info("fence error %ld on [%p]\n", ret, fence);
- sync_dump();
- }
- return ret;
-}
-EXPORT_SYMBOL(sync_fence_wait);
-
-static const char *android_fence_get_driver_name(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- return parent->ops->driver_name;
-}
-
-static const char *android_fence_get_timeline_name(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- return parent->name;
-}
-
-static void android_fence_release(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
- unsigned long flags;
-
- spin_lock_irqsave(fence->lock, flags);
- list_del(&pt->child_list);
- if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
- list_del(&pt->active_list);
- spin_unlock_irqrestore(fence->lock, flags);
-
- if (parent->ops->free_pt)
- parent->ops->free_pt(pt);
-
- sync_timeline_put(parent);
- fence_free(&pt->base);
-}
-
-static bool android_fence_signaled(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
- int ret;
-
- ret = parent->ops->has_signaled(pt);
- if (ret < 0)
- fence->status = ret;
- return ret;
-}
-
-static bool android_fence_enable_signaling(struct fence *fence)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (android_fence_signaled(fence))
- return false;
-
- list_add_tail(&pt->active_list, &parent->active_list_head);
- return true;
-}
-
-static int android_fence_fill_driver_data(struct fence *fence,
- void *data, int size)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (!parent->ops->fill_driver_data)
- return 0;
- return parent->ops->fill_driver_data(pt, data, size);
-}
-
-static void android_fence_value_str(struct fence *fence,
- char *str, int size)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (!parent->ops->pt_value_str) {
- if (size)
- *str = 0;
- return;
- }
- parent->ops->pt_value_str(pt, str, size);
-}
-
-static void android_fence_timeline_value_str(struct fence *fence,
- char *str, int size)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (!parent->ops->timeline_value_str) {
- if (size)
- *str = 0;
- return;
- }
- parent->ops->timeline_value_str(parent, str, size);
-}
-
-static const struct fence_ops android_fence_ops = {
- .get_driver_name = android_fence_get_driver_name,
- .get_timeline_name = android_fence_get_timeline_name,
- .enable_signaling = android_fence_enable_signaling,
- .signaled = android_fence_signaled,
- .wait = fence_default_wait,
- .release = android_fence_release,
- .fill_driver_data = android_fence_fill_driver_data,
- .fence_value_str = android_fence_value_str,
- .timeline_value_str = android_fence_timeline_value_str,
-};
-
-static void sync_fence_free(struct kref *kref)
-{
- struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
- int i, status = atomic_read(&fence->status);
-
- for (i = 0; i < fence->num_fences; ++i) {
- if (status)
- fence_remove_callback(fence->cbs[i].sync_pt,
- &fence->cbs[i].cb);
- fence_put(fence->cbs[i].sync_pt);
- }
-
- kfree(fence);
-}
-
-static int sync_fence_release(struct inode *inode, struct file *file)
-{
- struct sync_fence *fence = file->private_data;
-
- sync_fence_debug_remove(fence);
-
- kref_put(&fence->kref, sync_fence_free);
- return 0;
-}
-
-static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
-{
- struct sync_fence *fence = file->private_data;
- int status;
-
- poll_wait(file, &fence->wq, wait);
-
- status = atomic_read(&fence->status);
-
- if (!status)
- return POLLIN;
- else if (status < 0)
- return POLLERR;
- return 0;
-}
-
-static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
-{
- __s32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- return sync_fence_wait(fence, value);
-}
-
-static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_fence *fence2, *fence3;
- struct sync_merge_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fd;
- }
-
- fence2 = sync_fence_fdget(data.fd2);
- if (fence2 == NULL) {
- err = -ENOENT;
- goto err_put_fd;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence3 = sync_fence_merge(data.name, fence, fence2);
- if (fence3 == NULL) {
- err = -ENOMEM;
- goto err_put_fence2;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fence3;
- }
-
- sync_fence_install(fence3, fd);
- sync_fence_put(fence2);
- return 0;
-
-err_put_fence3:
- sync_fence_put(fence3);
-
-err_put_fence2:
- sync_fence_put(fence2);
-
-err_put_fd:
- put_unused_fd(fd);
- return err;
-}
-
-static int sync_fill_pt_info(struct fence *fence, void *data, int size)
-{
- struct sync_pt_info *info = data;
- int ret;
-
- if (size < sizeof(struct sync_pt_info))
- return -ENOMEM;
-
- info->len = sizeof(struct sync_pt_info);
-
- if (fence->ops->fill_driver_data) {
- ret = fence->ops->fill_driver_data(fence, info->driver_data,
- size - sizeof(*info));
- if (ret < 0)
- return ret;
-
- info->len += ret;
- }
-
- strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
- sizeof(info->obj_name));
- strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
- sizeof(info->driver_name));
- if (fence_is_signaled(fence))
- info->status = fence->status >= 0 ? 1 : fence->status;
- else
- info->status = 0;
- info->timestamp_ns = ktime_to_ns(fence->timestamp);
-
- return info->len;
-}
-
-static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
- unsigned long arg)
-{
- struct sync_fence_info_data *data;
- __u32 size;
- __u32 len = 0;
- int ret, i;
-
- if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
- return -EFAULT;
-
- if (size < sizeof(struct sync_fence_info_data))
- return -EINVAL;
-
- if (size > 4096)
- size = 4096;
-
- data = kzalloc(size, GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- strlcpy(data->name, fence->name, sizeof(data->name));
- data->status = atomic_read(&fence->status);
- if (data->status >= 0)
- data->status = !data->status;
-
- len = sizeof(struct sync_fence_info_data);
-
- for (i = 0; i < fence->num_fences; ++i) {
- struct fence *pt = fence->cbs[i].sync_pt;
-
- ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
-
- if (ret < 0)
- goto out;
-
- len += ret;
- }
-
- data->len = len;
-
- if (copy_to_user((void __user *)arg, data, len))
- ret = -EFAULT;
- else
- ret = 0;
-
-out:
- kfree(data);
-
- return ret;
-}
-
-static long sync_fence_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sync_fence *fence = file->private_data;
-
- switch (cmd) {
- case SYNC_IOC_WAIT:
- return sync_fence_ioctl_wait(fence, arg);
-
- case SYNC_IOC_MERGE:
- return sync_fence_ioctl_merge(fence, arg);
-
- case SYNC_IOC_FENCE_INFO:
- return sync_fence_ioctl_fence_info(fence, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sync_fence_fops = {
- .release = sync_fence_release,
- .poll = sync_fence_poll,
- .unlocked_ioctl = sync_fence_ioctl,
- .compat_ioctl = sync_fence_ioctl,
-};
-
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
deleted file mode 100644
index afa0752..0000000
--- a/drivers/staging/android/sync.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * include/linux/sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SYNC_H
-#define _LINUX_SYNC_H
-
-#include <linux/types.h>
-#include <linux/kref.h>
-#include <linux/ktime.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/fence.h>
-
-#include "uapi/sync.h"
-
-struct sync_timeline;
-struct sync_pt;
-struct sync_fence;
-
-/**
- * struct sync_timeline_ops - sync object implementation ops
- * @driver_name: name of the implementation
- * @dup: duplicate a sync_pt
- * @has_signaled: returns:
- * 1 if pt has signaled
- * 0 if pt has not signaled
- * <0 on error
- * @compare: returns:
- * 1 if b will signal before a
- * 0 if a and b will signal at the same time
- * -1 if a will signal before b
- * @free_pt: called before sync_pt is freed
- * @release_obj: called before sync_timeline is freed
- * @fill_driver_data: write implementation specific driver data to data.
- * should return an error if there is not enough room
- * as specified by size. This information is returned
- * to userspace by SYNC_IOC_FENCE_INFO.
- * @timeline_value_str: fill str with the value of the sync_timeline's counter
- * @pt_value_str: fill str with the value of the sync_pt
- */
-struct sync_timeline_ops {
- const char *driver_name;
-
- /* required */
- struct sync_pt * (*dup)(struct sync_pt *pt);
-
- /* required */
- int (*has_signaled)(struct sync_pt *pt);
-
- /* required */
- int (*compare)(struct sync_pt *a, struct sync_pt *b);
-
- /* optional */
- void (*free_pt)(struct sync_pt *sync_pt);
-
- /* optional */
- void (*release_obj)(struct sync_timeline *sync_timeline);
-
- /* optional */
- int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
-
- /* optional */
- void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
- int size);
-
- /* optional */
- void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
-};
-
-/**
- * struct sync_timeline - sync object
- * @kref: reference count on fence.
- * @ops: ops that define the implementation of the sync_timeline
- * @name: name of the sync_timeline. Useful for debugging
- * @destroyed: set when sync_timeline is destroyed
- * @child_list_head: list of children sync_pts for this sync_timeline
- * @child_list_lock: lock protecting @child_list_head, destroyed, and
- * sync_pt.status
- * @active_list_head: list of active (unsignaled/errored) sync_pts
- * @sync_timeline_list: membership in global sync_timeline_list
- */
-struct sync_timeline {
- struct kref kref;
- const struct sync_timeline_ops *ops;
- char name[32];
-
- /* protected by child_list_lock */
- bool destroyed;
- int context, value;
-
- struct list_head child_list_head;
- spinlock_t child_list_lock;
-
- struct list_head active_list_head;
-
-#ifdef CONFIG_DEBUG_FS
- struct list_head sync_timeline_list;
-#endif
-};
-
-/**
- * struct sync_pt - sync point
- * @fence: base fence class
- * @child_list: membership in sync_timeline.child_list_head
- * @active_list: membership in sync_timeline.active_list_head
- * @signaled_list: membership in temporary signaled_list on stack
- * @fence: sync_fence to which the sync_pt belongs
- * @pt_list: membership in sync_fence.pt_list_head
- * @status: 1: signaled, 0:active, <0: error
- * @timestamp: time which sync_pt status transitioned from active to
- * signaled or error.
- */
-struct sync_pt {
- struct fence base;
-
- struct list_head child_list;
- struct list_head active_list;
-};
-
-static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
-{
- return container_of(pt->base.lock, struct sync_timeline,
- child_list_lock);
-}
-
-struct sync_fence_cb {
- struct fence_cb cb;
- struct fence *sync_pt;
- struct sync_fence *fence;
-};
-
-/**
- * struct sync_fence - sync fence
- * @file: file representing this fence
- * @kref: reference count on fence.
- * @name: name of sync_fence. Useful for debugging
- * @pt_list_head: list of sync_pts in the fence. immutable once fence
- * is created
- * @status: 0: signaled, >0:active, <0: error
- *
- * @wq: wait queue for fence signaling
- * @sync_fence_list: membership in global fence list
- */
-struct sync_fence {
- struct file *file;
- struct kref kref;
- char name[32];
-#ifdef CONFIG_DEBUG_FS
- struct list_head sync_fence_list;
-#endif
- int num_fences;
-
- wait_queue_head_t wq;
- atomic_t status;
-
- struct sync_fence_cb cbs[];
-};
-
-struct sync_fence_waiter;
-typedef void (*sync_callback_t)(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
- * @waiter_list: membership in sync_fence.waiter_list_head
- * @callback: function pointer to call when fence signals
- * @callback_data: pointer to pass to @callback
- */
-struct sync_fence_waiter {
- wait_queue_t work;
- sync_callback_t callback;
-};
-
-static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
- sync_callback_t callback)
-{
- INIT_LIST_HEAD(&waiter->work.task_list);
- waiter->callback = callback;
-}
-
-/*
- * API for sync_timeline implementers
- */
-
-/**
- * sync_timeline_create() - creates a sync object
- * @ops: specifies the implementation ops for the object
- * @size: size to allocate for this obj
- * @name: sync_timeline name
- *
- * Creates a new sync_timeline which will use the implementation specified by
- * @ops. @size bytes will be allocated allowing for implementation specific
- * data to be kept after the generic sync_timeline struct.
- */
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
- int size, const char *name);
-
-/**
- * sync_timeline_destroy() - destroys a sync object
- * @obj: sync_timeline to destroy
- *
- * A sync implementation should call this when the @obj is going away
- * (i.e. module unload.) @obj won't actually be freed until all its children
- * sync_pts are freed.
- */
-void sync_timeline_destroy(struct sync_timeline *obj);
-
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj: sync_timeline to signal
- *
- * A sync implementation should call this any time one of it's sync_pts
- * has signaled or has an error condition.
- */
-void sync_timeline_signal(struct sync_timeline *obj);
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent: sync_pt's parent sync_timeline
- * @size: size to allocate for this pt
- *
- * Creates a new sync_pt as a child of @parent. @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct.
- */
-struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
-
-/**
- * sync_pt_free() - frees a sync pt
- * @pt: sync_pt to free
- *
- * This should only be called on sync_pts which have been created but
- * not added to a fence.
- */
-void sync_pt_free(struct sync_pt *pt);
-
-/**
- * sync_fence_create() - creates a sync fence
- * @name: name of fence to create
- * @pt: sync_pt to add to the fence
- *
- * Creates a fence containg @pt. Once this is called, the fence takes
- * ownership of @pt.
- */
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
-
-/**
- * sync_fence_create_dma() - creates a sync fence from dma-fence
- * @name: name of fence to create
- * @pt: dma-fence to add to the fence
- *
- * Creates a fence containg @pt. Once this is called, the fence takes
- * ownership of @pt.
- */
-struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt);
-
-/*
- * API for sync_fence consumers
- */
-
-/**
- * sync_fence_merge() - merge two fences
- * @name: name of new fence
- * @a: fence a
- * @b: fence b
- *
- * Creates a new fence which contains copies of all the sync_pts in both
- * @a and @b. @a and @b remain valid, independent fences.
- */
-struct sync_fence *sync_fence_merge(const char *name,
- struct sync_fence *a, struct sync_fence *b);
-
-/**
- * sync_fence_fdget() - get a fence from an fd
- * @fd: fd referencing a fence
- *
- * Ensures @fd references a valid fence, increments the refcount of the backing
- * file, and returns the fence.
- */
-struct sync_fence *sync_fence_fdget(int fd);
-
-/**
- * sync_fence_put() - puts a reference of a sync fence
- * @fence: fence to put
- *
- * Puts a reference on @fence. If this is the last reference, the fence and
- * all it's sync_pts will be freed
- */
-void sync_fence_put(struct sync_fence *fence);
-
-/**
- * sync_fence_install() - installs a fence into a file descriptor
- * @fence: fence to install
- * @fd: file descriptor in which to install the fence
- *
- * Installs @fence into @fd. @fd's should be acquired through
- * get_unused_fd_flags(O_CLOEXEC).
- */
-void sync_fence_install(struct sync_fence *fence, int fd);
-
-/**
- * sync_fence_wait_async() - registers and async wait on the fence
- * @fence: fence to wait on
- * @waiter: waiter callback struck
- *
- * Returns 1 if @fence has already signaled.
- *
- * Registers a callback to be called when @fence signals or has an error.
- * @waiter should be initialized with sync_fence_waiter_init().
- */
-int sync_fence_wait_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * sync_fence_cancel_async() - cancels an async wait
- * @fence: fence to wait on
- * @waiter: waiter callback struck
- *
- * returns 0 if waiter was removed from fence's async waiter list.
- * returns -ENOENT if waiter was not found on fence's async waiter list.
- *
- * Cancels a previously registered async wait. Will fail gracefully if
- * @waiter was never registered or if @fence has already signaled @waiter.
- */
-int sync_fence_cancel_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * sync_fence_wait() - wait on fence
- * @fence: fence to wait on
- * @tiemout: timeout in ms
- *
- * Wait for @fence to be signaled or have an error. Waits indefinitely
- * if @timeout < 0
- */
-int sync_fence_wait(struct sync_fence *fence, long timeout);
-
-#ifdef CONFIG_DEBUG_FS
-
-void sync_timeline_debug_add(struct sync_timeline *obj);
-void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_fence_debug_add(struct sync_fence *fence);
-void sync_fence_debug_remove(struct sync_fence *fence);
-void sync_dump(void);
-
-#else
-# define sync_timeline_debug_add(obj)
-# define sync_timeline_debug_remove(obj)
-# define sync_fence_debug_add(fence)
-# define sync_fence_debug_remove(fence)
-# define sync_dump()
-#endif
-int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
- int wake_flags, void *key);
-
-#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
deleted file mode 100644
index 02a1649..0000000
--- a/drivers/staging/android/sync_debug.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * drivers/base/sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-#include <linux/time64.h>
-#include "sync.h"
-
-#ifdef CONFIG_DEBUG_FS
-
-static LIST_HEAD(sync_timeline_list_head);
-static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_fence_list_head);
-static DEFINE_SPINLOCK(sync_fence_list_lock);
-
-void sync_timeline_debug_add(struct sync_timeline *obj)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-}
-
-void sync_timeline_debug_remove(struct sync_timeline *obj)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_del(&obj->sync_timeline_list);
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-}
-
-void sync_fence_debug_add(struct sync_fence *fence)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-}
-
-void sync_fence_debug_remove(struct sync_fence *fence)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_del(&fence->sync_fence_list);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-}
-
-static const char *sync_status_str(int status)
-{
- if (status == 0)
- return "signaled";
-
- if (status > 0)
- return "active";
-
- return "error";
-}
-
-static void sync_print_pt(struct seq_file *s, struct fence *pt, bool fence)
-{
- int status = 1;
-
- if (fence_is_signaled_locked(pt))
- status = pt->status;
-
- seq_printf(s, " %s%spt %s",
- fence && pt->ops->get_timeline_name ?
- pt->ops->get_timeline_name(pt) : "",
- fence ? "_" : "",
- sync_status_str(status));
-
- if (status <= 0) {
- struct timespec64 ts64 =
- ktime_to_timespec64(pt->timestamp);
-
- seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
- }
-
- if ((!fence || pt->ops->timeline_value_str) &&
- pt->ops->fence_value_str) {
- char value[64];
- bool success;
-
- pt->ops->fence_value_str(pt, value, sizeof(value));
- success = strlen(value);
-
- if (success)
- seq_printf(s, ": %s", value);
-
- if (success && fence) {
- pt->ops->timeline_value_str(pt, value, sizeof(value));
-
- if (strlen(value))
- seq_printf(s, " / %s", value);
- }
- }
-
- seq_puts(s, "\n");
-}
-
-static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
-{
- struct list_head *pos;
- unsigned long flags;
-
- seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
-
- if (obj->ops->timeline_value_str) {
- char value[64];
-
- obj->ops->timeline_value_str(obj, value, sizeof(value));
- seq_printf(s, ": %s", value);
- }
-
- seq_puts(s, "\n");
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- list_for_each(pos, &obj->child_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, child_list);
- sync_print_pt(s, &pt->base, false);
- }
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
-{
- wait_queue_t *pos;
- unsigned long flags;
- int i;
-
- seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
- sync_status_str(atomic_read(&fence->status)));
-
- for (i = 0; i < fence->num_fences; ++i)
- sync_print_pt(s, fence->cbs[i].sync_pt, true);
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- list_for_each_entry(pos, &fence->wq.task_list, task_list) {
- struct sync_fence_waiter *waiter;
-
- if (pos->func != &sync_fence_wake_up_wq)
- continue;
-
- waiter = container_of(pos, struct sync_fence_waiter, work);
-
- seq_printf(s, "waiter %pF\n", waiter->callback);
- }
- spin_unlock_irqrestore(&fence->wq.lock, flags);
-}
-
-static int sync_debugfs_show(struct seq_file *s, void *unused)
-{
- unsigned long flags;
- struct list_head *pos;
-
- seq_puts(s, "objs:\n--------------\n");
-
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
- list_for_each(pos, &sync_timeline_list_head) {
- struct sync_timeline *obj =
- container_of(pos, struct sync_timeline,
- sync_timeline_list);
-
- sync_print_obj(s, obj);
- seq_puts(s, "\n");
- }
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-
- seq_puts(s, "fences:\n--------------\n");
-
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_for_each(pos, &sync_fence_list_head) {
- struct sync_fence *fence =
- container_of(pos, struct sync_fence, sync_fence_list);
-
- sync_print_fence(s, fence);
- seq_puts(s, "\n");
- }
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
- return 0;
-}
-
-static int sync_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sync_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations sync_debugfs_fops = {
- .open = sync_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static __init int sync_debugfs_init(void)
-{
- debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
- return 0;
-}
-late_initcall(sync_debugfs_init);
-
-#define DUMP_CHUNK 256
-static char sync_dump_buf[64 * 1024];
-void sync_dump(void)
-{
- struct seq_file s = {
- .buf = sync_dump_buf,
- .size = sizeof(sync_dump_buf) - 1,
- };
- int i;
-
- sync_debugfs_show(&s, NULL);
-
- for (i = 0; i < s.count; i += DUMP_CHUNK) {
- if ((s.count - i) > DUMP_CHUNK) {
- char c = s.buf[i + DUMP_CHUNK];
-
- s.buf[i + DUMP_CHUNK] = 0;
- pr_cont("%s", s.buf + i);
- s.buf[i + DUMP_CHUNK] = c;
- } else {
- s.buf[s.count] = 0;
- pr_cont("%s", s.buf + i);
- }
- }
-}
-
-#endif
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
deleted file mode 100644
index 77edb97..0000000
--- a/drivers/staging/android/trace/sync.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
-#define TRACE_SYSTEM sync
-
-#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_SYNC_H
-
-#include "../sync.h"
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(sync_timeline,
- TP_PROTO(struct sync_timeline *timeline),
-
- TP_ARGS(timeline),
-
- TP_STRUCT__entry(
- __string(name, timeline->name)
- __array(char, value, 32)
- ),
-
- TP_fast_assign(
- __assign_str(name, timeline->name);
- if (timeline->ops->timeline_value_str) {
- timeline->ops->timeline_value_str(timeline,
- __entry->value,
- sizeof(__entry->value));
- } else {
- __entry->value[0] = '\0';
- }
- ),
-
- TP_printk("name=%s value=%s", __get_str(name), __entry->value)
-);
-
-TRACE_EVENT(sync_wait,
- TP_PROTO(struct sync_fence *fence, int begin),
-
- TP_ARGS(fence, begin),
-
- TP_STRUCT__entry(
- __string(name, fence->name)
- __field(s32, status)
- __field(u32, begin)
- ),
-
- TP_fast_assign(
- __assign_str(name, fence->name);
- __entry->status = atomic_read(&fence->status);
- __entry->begin = begin;
- ),
-
- TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end",
- __get_str(name), __entry->status)
-);
-
-TRACE_EVENT(sync_pt,
- TP_PROTO(struct fence *pt),
-
- TP_ARGS(pt),
-
- TP_STRUCT__entry(
- __string(timeline, pt->ops->get_timeline_name(pt))
- __array(char, value, 32)
- ),
-
- TP_fast_assign(
- __assign_str(timeline, pt->ops->get_timeline_name(pt));
- if (pt->ops->fence_value_str) {
- pt->ops->fence_value_str(pt, __entry->value,
- sizeof(__entry->value));
- } else {
- __entry->value[0] = '\0';
- }
- ),
-
- TP_printk("name=%s value=%s", __get_str(timeline), __entry->value)
-);
-
-#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
deleted file mode 100644
index 9b5d486..0000000
--- a/drivers/staging/android/uapi/sw_sync.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_SW_SYNC_H
-#define _UAPI_LINUX_SW_SYNC_H
-
-#include <linux/types.h>
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
-#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
deleted file mode 100644
index e964c75..0000000
--- a/drivers/staging/android/uapi/sync.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_SYNC_H
-#define _UAPI_LINUX_SYNC_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len: length of sync_pt_info including any driver_data
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implementing the parent
- * @status: status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependent data
- */
-struct sync_pt_info {
- __u32 len;
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-
- __u8 driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data returned to userspace
- * including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @pt_info: a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds. Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
- struct sync_fence_info_data)
-
-#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild
index 245aa6e..7c415d0 100644
--- a/include/uapi/Kbuild
+++ b/include/uapi/Kbuild
@@ -13,3 +13,4 @@ header-y += drm/
header-y += xen/
header-y += scsi/
header-y += misc/
+header-y += sync/
diff --git a/include/uapi/sync/Kbuild b/include/uapi/sync/Kbuild
new file mode 100644
index 0000000..2716ffe
--- /dev/null
+++ b/include/uapi/sync/Kbuild
@@ -0,0 +1,3 @@
+# sync Header export list
+header-y += sw_sync.h
+header-y += sync.h
diff --git a/include/uapi/sync/sw_sync.h b/include/uapi/sync/sw_sync.h
new file mode 100644
index 0000000..9b5d486
--- /dev/null
+++ b/include/uapi/sync/sw_sync.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/include/uapi/sync/sync.h b/include/uapi/sync/sync.h
new file mode 100644
index 0000000..e964c75
--- /dev/null
+++ b/include/uapi/sync/sync.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2: file descriptor of second fence
+ * @name: name of new fence
+ * @fence: returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+ __s32 fd2; /* fd of second fence */
+ char name[32]; /* name of new fence */
+ __s32 fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len: length of sync_pt_info including any driver_data
+ * @obj_name: name of parent sync_timeline
+ * @driver_name: name of driver implementing the parent
+ * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ * @driver_data: any driver dependent data
+ */
+struct sync_pt_info {
+ __u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u64 timestamp_ns;
+
+ __u8 driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len: ioctl caller writes the size of the buffer its passing in.
+ * ioctl returns length of sync_fence_data returned to userspace
+ * including pt_info.
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+ __u32 len;
+ char name[32];
+ __s32 status;
+
+ __u8 pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
+ struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
--
1.9.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [RFC] igt/gem_exec_fence: New test for sync/fence interface
2016-01-13 17:57 [RFC 0/9] Add native sync support to i915 driver John.C.Harrison
` (8 preceding siblings ...)
2016-01-13 17:57 ` [RFC 9/9] drm/i915: Add sync support to the scheduler statistics and status dump John.C.Harrison
@ 2016-01-19 16:04 ` John.C.Harrison
9 siblings, 0 replies; 23+ messages in thread
From: John.C.Harrison @ 2016-01-19 16:04 UTC (permalink / raw)
To: Intel-GFX; +Cc: Gustavo Padovan
From: John Harrison <John.C.Harrison@Intel.com>
Note, this is a work in progress. It is being posted now as there is
work going on to change the debugging interface used by this test. So
it would be useful to get some comments on whether the proposed
changes will cause a problem for this test or whether the test itself
should be done differently.
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
---
lib/intel_batchbuffer.c | 36 ++
lib/intel_batchbuffer.h | 1 +
tests/Makefile.sources | 1 +
tests/gem_exec_fence.c | 1470 +++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 1508 insertions(+)
create mode 100644 tests/gem_exec_fence.c
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 692521f..55c7f9f 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -186,6 +186,27 @@ intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring)
intel_batchbuffer_reset(batch);
}
+static void
+intel_batchbuffer_flush_on_ring_fence(struct intel_batchbuffer *batch, int ring,
+ int fence_in, int *fence_out)
+{
+ unsigned int used = flush_on_ring_common(batch, ring);
+ drm_intel_context *ctx;
+
+ if (used == 0)
+ return;
+
+ do_or_die(drm_intel_bo_subdata(batch->bo, 0, used, batch->buffer));
+
+ batch->ptr = NULL;
+
+ ctx = batch->ctx;
+ do_or_die(drm_intel_gem_bo_context_fence_exec(batch->bo, ctx, used,
+ ring, fence_in, fence_out));
+
+ intel_batchbuffer_reset(batch);
+}
+
void
intel_batchbuffer_set_context(struct intel_batchbuffer *batch,
drm_intel_context *context)
@@ -239,6 +260,21 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
intel_batchbuffer_flush_on_ring(batch, ring);
}
+/**
+ * intel_batchbuffer_flush_fence:
+ * @batch: batchbuffer object
+ *
+ * Submits the batch for execution on the blitter engine, selecting the right
+ * ring depending upon the hardware platform.
+ */
+void
+intel_batchbuffer_flush_fence(struct intel_batchbuffer *batch, int fence_in, int *fence_out)
+{
+ int ring = 0;
+ if (HAS_BLT_RING(batch->devid))
+ ring = I915_EXEC_BLT;
+ intel_batchbuffer_flush_on_ring_fence(batch, ring, fence_in, fence_out);
+}
/**
* intel_batchbuffer_emit_reloc:
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index 869747d..5dece2a 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -35,6 +35,7 @@ void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_flush(struct intel_batchbuffer *batch);
+void intel_batchbuffer_flush_fence(struct intel_batchbuffer *batch, int fence_in, int *fence_out);
void intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring);
void intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch,
drm_intel_context *context);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 8fb2de8..1000324 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -26,6 +26,7 @@ TESTS_progs_M = \
gem_exec_alignment \
gem_exec_bad_domains \
gem_exec_faulting_reloc \
+ gem_exec_fence \
gem_exec_nop \
gem_exec_params \
gem_exec_parse \
diff --git a/tests/gem_exec_fence.c b/tests/gem_exec_fence.c
new file mode 100644
index 0000000..ab6cc84
--- /dev/null
+++ b/tests/gem_exec_fence.c
@@ -0,0 +1,1470 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+ * John Harrison <john.c.harrison@intel.com>
+ * Geoff Miller <geoff.miller@intel.com>
+ *
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <signal.h>
+#include <time.h>
+#include <pthread.h>
+#include "drm.h"
+#include "ioctl_wrappers.h"
+#include "drmtest.h"
+#include "intel_io.h"
+#include "intel_chipset.h"
+#include "igt_aux.h"
+#include "sync/sw_sync.h"
+#include "sync/sync.h"
+
+#define I915_DRIVER_NAME "i915"
+#define FENCE_ACTIVE (0)
+#define FENCE_SIGNALED (1)
+
+/* Structure to encapsulate a sw_sync device */
+struct sw_sync_obj
+{
+ int fd;
+ int tick; /* Can't read this directly, so we track it here */
+};
+
+static struct sw_sync_obj user_sync_obj;
+
+/*
+ * Open a new sw sync object
+ * @obj: Pointer to a struct sw_sync_obj
+ * @return: Zero if open worked.
+ */
+static int init_sw_sync(struct sw_sync_obj *obj)
+{
+ obj->fd = open("/dev/sw_sync", O_RDWR);
+ obj->tick = 0;
+ return (obj->fd == -1);
+}
+
+/*
+ * Closes sw_sync device
+ * @obj: pointer to a struct sw_sync_obj
+ */
+static void close_sw_sync(struct sw_sync_obj *obj)
+{
+ close(obj->fd);
+}
+
+/*
+ * Creates a user fence at a given place on the timeline
+ * Assumes that we are using a struct sw_sync_obj called user_sync_obj in
+ * global scope
+ * @fence_out: the new fence returned to the user
+ * @value: the position to place the fence on the timeline
+ * @return: return value from ioctl
+ */
+static int user_create_fence(int *fence_out, uint32_t value)
+{
+ int ret;
+ struct sw_sync_create_fence_data data;
+ data.value = value;
+ ret = ioctl(user_sync_obj.fd, SW_SYNC_IOC_CREATE_FENCE, &data);
+ *fence_out = data.fence;
+ return ret;
+}
+
+/*
+ * Increments timeline by a given count
+ * Assumes that we are using a struct sw_sync_obj called user_sync_obj in
+ * global scope. Note that we also increment the local counter here, but
+ * only if the ioctl succeeded, to avoid going out of sync.
+ * @step: Number of steps to increment the timeline
+ * @return: return value from ioctl
+ */
+static int user_inc_timeline(uint32_t step)
+{
+ int ret;
+ uint32_t localstep = step;
+ ret = ioctl(user_sync_obj.fd, SW_SYNC_IOC_INC, &localstep);
+ if (ret == 0)
+ {
+ user_sync_obj.tick += localstep;
+ }
+ return ret;
+}
+
+/*
+ * Wait on a given fence for a timeout
+ * This is a basic wrapper around the SYNC_IOC_WAIT ioctl, see sync/sync.h
+ * for behavioural details.
+ * @fence: fd for fence to wait on
+ * @timeout: pointer to timeout value in milliseconds
+ * @return: return value of ioctl
+ */
+static int wait_fence(int fence, int *timeout)
+{
+ return ioctl(fence, SYNC_IOC_WAIT, timeout);
+}
+
+/*
+ * Merge two fences into a new fence
+ * @fence_out: pointer to fd for new fence
+ * @fence_a: first input fence
+ * @fence_b: second input fence
+ * @return: return value of ioctl
+ */
+static int merge_fence(int *fence_out, int fence_a, int fence_b)
+{
+ int ret;
+ struct sync_merge_data data;
+ data.fd2 = fence_b;
+ ret = ioctl(fence_a, SYNC_IOC_MERGE, &data);
+ if (ret == 0)
+ {
+ *fence_out = data.fence;
+ }
+ return ret;
+}
+
+/*
+ * Writes fence info into sync_fence_info structure. Note that this can be
+ * variable length, so set data->len accordingly - see sync/sync.h
+ * @fence: fence to get information on
+ * @data: pointer to struct sync_fence_info_data
+ * @return: return value from ioctl
+ */
+static int get_fence_info(int fence, struct sync_fence_info_data *data)
+{
+ return ioctl(fence, SYNC_IOC_FENCE_INFO, data);
+}
+
+static int fd;
+static drm_intel_bufmgr *bufmgr;
+static struct intel_batchbuffer *batch;
+static uint32_t devid;
+
+static uint32_t nop_batch[2] = {MI_BATCH_BUFFER_END};
+static uint32_t handle;
+
+/*
+ * Directly submits a nop via the EXECBUFFER2 Ioctl
+ * The user of this function is expected to set the flags and fence arguments
+ * correctly.
+ * @ctx pointer to a drm_intel_context created by the user (use NULL to ignore)
+ * @flags control the engine selection, enable fence output
+ * @fence_in fence used by the submission
+ * @fence_out pointer to a fence optionally returned by the submission
+ */
+static int nop_exec_with_ctx(drm_intel_context *ctx, unsigned int flags, int fence_in, int *fence_out)
+{
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 gem_exec[1];
+ int ret = 0;
+
+ gem_exec[0].handle = handle;
+ gem_exec[0].relocation_count = 0;
+ gem_exec[0].relocs_ptr = 0;
+ gem_exec[0].alignment = 0;
+ gem_exec[0].offset = 0;
+ gem_exec[0].flags = 0;
+ gem_exec[0].rsvd1 = 0;
+ gem_exec[0].rsvd2 = 0;
+
+ execbuf.buffers_ptr = (uintptr_t)gem_exec;
+ execbuf.buffer_count = 1;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = 8;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = flags | I915_EXEC_SECURE;
+ if (ctx != NULL)
+ {
+ i915_execbuffer2_set_context_id(execbuf, *(int*)ctx);
+ }
+ else
+ {
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ }
+ execbuf.rsvd2 = fence_in;
+
+ ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
+ if (ret == 0 && fence_out != NULL)
+ *fence_out = (int) (execbuf.rsvd2 >> 32);
+
+ return ret;
+}
+
+/*
+ * Wrapper function for nop_exec_with_ctx for when context is not explicit
+ */
+static int nop_exec(unsigned int flags, int fence_in, int *fence_out)
+{
+ return nop_exec_with_ctx(NULL, flags, fence_in, fence_out);
+}
+
+/*
+ * Test to show that sending a batch buffer without requesting a fence
+ * doesn't return a fence.
+ * Assumptions: batch buffer was executed
+ */
+static void fence_not_requested_not_created(void)
+{
+ int fence;
+
+ igt_assert(nop_exec(I915_EXEC_RENDER, 0, &fence) == 0);
+ igt_assert(fence == 0);
+ gem_sync(fd, handle);
+}
+
+/*
+ * Test to show that we get a fence when one is requested.
+ * Assumptions: > 0 == valid fence, fence was initialised to <= 0
+ */
+static void fence_create(void)
+{
+ int fence;
+
+ igt_assert(nop_exec(I915_EXEC_RENDER | I915_EXEC_CREATE_FENCE, 0,
+ &fence) == 0);
+ igt_assert(fence > 0); /* Zero is a valid FD but we assume it will
+ always be taken during IGT runs and like this
+ we can catch more errors. */
+
+ close(fence);
+ gem_sync(fd, handle);
+}
+
+/*
+ * Test to show that a requested fence can be queried and comes from the
+ * correct driver.
+ * Assumptions: valid fence values are >= 0, fence initialised to < 0
+ * queried fence data is sensible
+ */
+static void fence_driver_data(void)
+{
+ int fence;
+ char buf[4096];
+ struct sync_fence_info_data *data =
+ (struct sync_fence_info_data *)buf;
+ struct sync_pt_info *pt = (struct sync_pt_info *)&data->pt_info;
+
+ igt_assert(nop_exec(I915_EXEC_RENDER | I915_EXEC_CREATE_FENCE, 0,
+ &fence) == 0);
+ igt_assert(fence >= 0);
+
+ gem_sync(fd, handle);
+
+ /* Read the sync fence info and check it matches our driver */
+ data->len = sizeof(buf);
+ igt_assert(get_fence_info(fence, data) == 0);
+ igt_assert(strcmp(pt->driver_name, I915_DRIVER_NAME) == 0);
+
+ close(fence);
+}
+
+/*
+ * Helper function to get the status of a given fence
+ * Calls the _SYNC_IOC_FENCE_INFO ioctl
+ * @fence Fence object to check
+ * @return Status of fence
+ */
+static int get_fence_status(int fence)
+{
+ char buf[4096];
+ struct sync_fence_info_data *data =
+ (struct sync_fence_info_data *)buf;
+
+ data->len = sizeof(buf);
+ igt_assert(get_fence_info(fence, data) == 0);
+
+ return data->status;
+}
+
+/*
+ * Tests that a requested fence becomes signalled.
+ * Assumptions: the fence was active at some point, fence values etc.
+ */
+static void fence_signaled(void)
+{
+ int fence, status;
+ unsigned int loop = 10;
+
+ igt_assert(nop_exec(I915_EXEC_RENDER | I915_EXEC_CREATE_FENCE, 0,
+ &fence) == 0);
+ igt_assert(fence > 0);
+
+ /* Make sure status is completed after a while */
+ status = get_fence_status(fence);
+ while (status == FENCE_ACTIVE && loop--) {
+ usleep(20000);
+ status = get_fence_status(fence);
+ }
+ igt_assert(status == FENCE_SIGNALED);
+
+ close(fence);
+}
+
+/*
+ * Helper function to create a blitting batch buffer
+ * Assumes that 'batch' is in scope
+ * @dst Destination buffer object
+ * @src Source buffer object
+ * @width Blit width
+ * @height Blit height
+ * @dst_pitch Destination pitch
+ * @src_pitch Source pitch
+ * TODO determine whether these instructions are ok for:
+ * a) multiple flavours of Gen
+ * b) public consumption
+ */
+static void blit_copy(drm_intel_bo *dst, drm_intel_bo *src,
+ unsigned int width, unsigned int height,
+ unsigned int dst_pitch, unsigned int src_pitch)
+{
+ BLIT_COPY_BATCH_START(0);
+ OUT_BATCH((3 << 24) | /* 32 bits */
+ (0xcc << 16) | /* copy ROP */
+ dst_pitch);
+ OUT_BATCH(0 << 16 | 0);
+ OUT_BATCH(height << 16 | width);
+ OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
+ OUT_BATCH(0 << 16 | 0);
+ OUT_BATCH(src_pitch);
+ OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
+ ADVANCE_BATCH();
+
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
+ OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ }
+}
+
+#define NSEC_PER_SEC 1000000000L
+#define USEC_PER_SEC 1000000L
+
+/*
+ * Helper function to generate and submit a (series of ?) large blit(s)
+ * @limit number of repeated blits in the same batch buffer
+ * @timeout if nonzero, wait on the bo for timeout ns
+ * @fence_in pass in fence to use as sync point
+ * @fence_out pointer to pass back fence if timeout is nonzero
+ * @return zero or value of the bo wait call
+ */
+static int _emit_dummy_load__bcs(int limit, int timeout, int fence_in, int *fence_out)
+{
+ int i, ret = 0;
+ drm_intel_bo *src_bo, *dst_bo;
+
+ src_bo = drm_intel_bo_alloc(bufmgr, "dummy_bo", 2048*2048*4, 4096);
+ igt_assert(src_bo);
+
+ dst_bo = drm_intel_bo_alloc(bufmgr, "dummy_bo", 2048*2048*4, 4096);
+ igt_assert(dst_bo);
+
+ for (i = 0; i < limit; i++) {
+ blit_copy(dst_bo, src_bo,
+ 2048, 2048,
+ 2048*4, 2048*4);
+ }
+ intel_batchbuffer_flush_fence(batch, fence_in, timeout > 0 ? NULL : fence_out);
+
+ if (timeout > 0)
+ ret = drm_intel_gem_bo_wait(dst_bo, timeout * NSEC_PER_SEC);
+
+ drm_intel_bo_unreference(src_bo);
+ drm_intel_bo_unreference(dst_bo);
+
+ return ret;
+}
+
+/*
+ * Helper function to get current time in usecs
+ * @return Current time in usecs
+ */
+static unsigned long gettime_us(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+
+ return ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
+}
+
+/*
+ * Helper function finds the limit to generate a second's worth
+ * of submission activity on a given ring (engine)
+ * @ring_name String representing ring (engine) name
+ * @emit Pointer to function that generates a workload
+ * @return Number of operations per second
+ */
+static int calibrate_dummy_load(const char *ring_name,
+ int (*emit)(int limit, int timeout, int fence_in, int *fence_out))
+{
+ unsigned long start;
+ int ops = 1;
+
+ start = gettime_us();
+
+ do {
+ unsigned long diff;
+ int ret;
+
+ ret = emit((ops+1)/2, 10, -1, NULL);
+ diff = gettime_us() - start;
+
+ if (ret || diff / USEC_PER_SEC >= 1)
+ break;
+
+ ops += ops;
+ } while (ops < 100000);
+
+ igt_debug("%s dummy load calibrated: %d operations / second\n",
+ ring_name, ops);
+
+ return ops;
+}
+
+static int ops_per_sec;
+
+/*
+ * Helper function to submit N seconds worth of blits and generate
+ * a fence to wait on.
+ * @seconds Number of seconds worth of operations to submit
+ * @fence_out Pointer to requested fence
+ */
+static void emit_dummy_load__bcs(int seconds, int *fence_out)
+{
+ if (ops_per_sec == 0)
+ ops_per_sec = calibrate_dummy_load("bcs",
+ _emit_dummy_load__bcs);
+
+ _emit_dummy_load__bcs(seconds * ops_per_sec, 0, -1, fence_out);
+}
+
+/*
+ * Tests that fences make a transition from active to signalled
+ * Assumptions: valid fence values
+ */
+static void fence_signal(void)
+{
+ int fence;
+ unsigned int loop = 1000;
+ int status;
+ unsigned long start, end;
+
+ start = gettime_us();
+
+ /* Submit a spinning batch */
+ emit_dummy_load__bcs(2, &fence);
+ igt_assert(fence > 0);
+
+ /* Make sure status is active after a while */
+ usleep(20000);
+ status = get_fence_status(fence);
+ igt_assert(status == FENCE_ACTIVE);
+
+ /* Make sure status is completed after a while */
+ status = get_fence_status(fence);
+ while (status == FENCE_ACTIVE && loop--) {
+ usleep(20000);
+ status = get_fence_status(fence);
+ }
+ igt_assert(status == FENCE_SIGNALED);
+ end = gettime_us();
+
+ printf("Fence took %ld.%06lds\n", (end - start) / 1000000, (end - start) % 1000000);
+
+ close(fence);
+}
+
+/*
+ * Dummy action for signal catcher
+ */
+static void signal_nop(int sig)
+{
+}
+
+/*
+ * Test that we can wait on an active fence for less than the time it
+ * takes to clear, and then wait for the fence to clear properly.
+ */
+static void fence_timeout(void)
+{
+ int fence;
+ int timeout = 500; /* in ms */
+ struct sigaction sigact, orig_sigact;
+
+ /* Submit a spinning batch */
+ emit_dummy_load__bcs(2, &fence);
+ igt_assert(fence > 0);
+
+ /* Make sure status is active after a while */
+ usleep(20000);
+ igt_assert(get_fence_status(fence) == FENCE_ACTIVE);
+
+ /* Set up signal to break the wait if broken */
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = signal_nop;
+ igt_assert(sigaction(SIGALRM, &sigact, &orig_sigact) == 0);
+ alarm(10);
+
+ /* Wait on fence */
+ igt_assert(wait_fence(fence, &timeout) < 0);
+ igt_assert(errno == ETIME);
+
+ /* Wait for batch completion */
+ timeout = 100000;
+ igt_assert(wait_fence(fence, &timeout) == 0);
+ igt_assert(get_fence_status(fence) == FENCE_SIGNALED);
+
+ /* Restore and cleanup */
+ alarm(0);
+ igt_assert(sigaction(SIGALRM, &orig_sigact, NULL) == 0);
+ close(fence);
+}
+
+/*
+ * Tests that we can wait for a full fence timeout (repeat?)
+ */
+static void fence_wait(void)
+{
+ int fence;
+ int timeout = 4000; /* in ms */
+ struct sigaction sigact, orig_sigact;
+ int ret;
+
+ /* Submit a spinning batch */
+ emit_dummy_load__bcs(1, &fence);
+ igt_assert(fence > 0);
+
+ /* Make sure status is active after a while */
+ usleep(20000);
+ igt_assert(get_fence_status(fence) == FENCE_ACTIVE);
+
+ /* Set up signal to break the wait if broken */
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = signal_nop;
+ igt_assert(sigaction(SIGALRM, &sigact, &orig_sigact) == 0);
+ alarm(10);
+
+ /* Wait for batch completion */
+ ret = wait_fence(fence, &timeout);
+ igt_assert(ret == 0);
+ igt_assert(get_fence_status(fence) == FENCE_SIGNALED);
+
+ /* Restore and cleanup */
+ alarm(0);
+ igt_assert(sigaction(SIGALRM, &orig_sigact, NULL) == 0);
+ close(fence);
+}
+
+/*
+ * Tests that a previously requested fence can be submitted with a batch
+ * buffer. Does not make any checks on the second fence.
+ */
+static void fence_wait_fence(void)
+{
+ int fence, fence_dup;
+ int timeout = 40000; /* in ms */
+ struct sigaction sigact, orig_sigact;
+ int ret;
+
+ /* Submit a spinning batch */
+ emit_dummy_load__bcs(2, &fence);
+ igt_assert(fence > 0);
+
+ igt_assert(get_fence_status(fence) == FENCE_ACTIVE);
+
+ fence_dup = dup(fence);
+ _emit_dummy_load__bcs(1 * ops_per_sec, 0, fence_dup, NULL);
+
+ /* Make sure status is active after a while */
+ usleep(20000);
+ igt_assert(get_fence_status(fence) == FENCE_ACTIVE);
+
+ /* Set up signal to break the wait if broken */
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = signal_nop;
+ igt_assert(sigaction(SIGALRM, &sigact, &orig_sigact) == 0);
+ alarm(10);
+
+ /* Wait for batch completion */
+ ret = wait_fence(fence, &timeout);
+ igt_assert(ret == 0);
+ igt_assert(get_fence_status(fence) == FENCE_SIGNALED);
+
+ /* Restore and cleanup */
+ alarm(0);
+ igt_assert(sigaction(SIGALRM, &orig_sigact, NULL) == 0);
+ close(fence);
+ close(fence_dup);
+}
+
+/*
+ * Tests that a previously requested fence can be submitted with a batch
+ * buffer. Checks that a simultaneously requested fence still works as
+ * expected.
+ */
+static void fence_wait_fence2(void)
+{
+ int fence, fence_dup;
+ int lastfence;
+ int timeout = 40000; /* in ms */
+ struct sigaction sigact, orig_sigact;
+ int ret;
+
+ /* Submit a spinning batch */
+ emit_dummy_load__bcs(2, &fence);
+ igt_assert(fence > 0);
+
+ igt_assert(get_fence_status(fence) == FENCE_ACTIVE);
+
+ fence_dup = dup(fence);
+ _emit_dummy_load__bcs(1 * ops_per_sec, 0, fence_dup, &lastfence);
+
+ /* Make sure status is active after a while */
+ usleep(20000);
+ igt_assert(get_fence_status(fence) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(lastfence) == FENCE_ACTIVE);
+
+ /* Set up signal to break the wait if broken */
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = signal_nop;
+ igt_assert(sigaction(SIGALRM, &sigact, &orig_sigact) == 0);
+ alarm(10);
+
+ usleep(20000);
+
+ /* Wait for batch completion */
+ ret = wait_fence(fence, &timeout);
+ igt_assert(ret == 0);
+ igt_assert(get_fence_status(fence) == FENCE_SIGNALED);
+
+ /* Check the second task is still running */
+ igt_assert(get_fence_status(lastfence) == FENCE_ACTIVE);
+
+ usleep(20000);
+
+ /* Wait for batch completion */
+ ret = wait_fence(lastfence, &timeout);
+ igt_assert(ret == 0);
+ igt_assert(get_fence_status(lastfence) == FENCE_SIGNALED);
+
+ /* Restore and cleanup */
+ alarm(0);
+ igt_assert(sigaction(SIGALRM, &orig_sigact, NULL) == 0);
+ close(fence);
+ close(fence_dup);
+}
+
+/*
+ * Test that i915 can wait on a user-created fence
+ */
+static void fence_user_fence_wait(void)
+{
+ int myfence, checkfence;
+ int myfencedup;
+ int ret;
+ int timeout = 4000; /* in ms */
+
+ /* Create a fence with the user sync device, at timeline offset 10 */
+ igt_assert(user_create_fence(&myfence, 10) == 0);
+ igt_assert(get_fence_status(myfence) == FENCE_ACTIVE);
+
+ /* Create a copy to submit to the driver */
+ myfencedup = dup(myfence);
+
+ /* Submit fence with driver - requesting a fence back */
+ ops_per_sec = calibrate_dummy_load("bcs", _emit_dummy_load__bcs);
+ _emit_dummy_load__bcs(1 * ops_per_sec, 0, myfencedup, &checkfence);
+
+ /* Make sure our workload is stalled */
+ igt_assert(wait_fence(checkfence, &timeout) < 0);
+ igt_assert(errno == ETIME);
+
+ /* Increment the timeline until the user fence is signalled */
+ ret = get_fence_status(myfence);
+ while (ret != FENCE_SIGNALED)
+ {
+ igt_assert(get_fence_status(checkfence) == FENCE_ACTIVE);
+ igt_assert(user_inc_timeline(1) == 0);
+ ret = get_fence_status(myfence);
+ }
+
+ /* Check the workload is still active */
+ igt_assert(get_fence_status(checkfence) == FENCE_ACTIVE);
+
+ /* Check that our workload will now finish */
+ igt_assert(wait_fence(checkfence, &timeout) == 0);
+ igt_assert(get_fence_status(checkfence) == FENCE_SIGNALED);
+
+ /* Close the fence */
+ close(myfence);
+}
+
+/*
+ * Test that i915 can wait on a user-created fence
+ */
+static void fence_user_fence_leak(void)
+{
+ int myfence, checkfence;
+ int myfencedup;
+ int timeout = 500; /* in ms */
+
+ printf( "******* WARNING *** WARNING *** WARNING *******\n" );
+ printf( "Until kernel sync code is fixed, this test will\n" );
+ printf( "leak batch buffers that can never be completed!\n" );
+ printf( "******* WARNING *** WARNING *** WARNING *******\n" );
+ return;
+
+ close_sw_sync(&user_sync_obj);
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+
+ /* Create a fence with the user sync device, at timeline offset 10 */
+ igt_assert(user_create_fence(&myfence, 10) == 0);
+ igt_assert(get_fence_status(myfence) == FENCE_ACTIVE);
+
+ /* Create a copy to submit to the driver */
+ myfencedup = dup(myfence);
+
+ /* Submit fence with driver - requesting a fence back */
+ _emit_dummy_load__bcs(1, 0, myfencedup, &checkfence);
+
+ /* Make sure our workload is stalled */
+ igt_assert(wait_fence(checkfence, &timeout) < 0);
+ igt_assert(errno == ETIME);
+
+ /* Close the fence without signalling it */
+ close(myfence);
+
+ /* Close the timeline and leak the fence */
+ close_sw_sync(&user_sync_obj);
+
+ /* Check that our workload will now finish */
+ timeout = 1000; /* in ms */
+ igt_assert(wait_fence(checkfence, &timeout) == 0);
+ igt_assert(get_fence_status(checkfence) == FENCE_SIGNALED);
+}
+
+/*
+ * Test out-of-order fence signalling
+ * A series of batch buffers are created so that they are dependent on fences
+ * which are in a different order:
+ * - bb[0] is dependent on f_user[1]
+ * - bb[1] is dependent on f_user[0]
+ */
+static void fence_ooo_fence(void)
+{
+ int f_out[2];
+ int f_user[2];
+ int f_user_dups[2];
+ int timeout = 200; /* in ms */
+
+ close_sw_sync(&user_sync_obj);
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+
+ /* Create user fences */
+ igt_assert(user_create_fence(&f_user[0], 1) == 0);
+ igt_assert(user_create_fence(&f_user[1], 2) == 0);
+
+ /* Check they are still active */
+ igt_assert(get_fence_status(f_user[0]) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(f_user[1]) == FENCE_ACTIVE);
+
+ /* Create duplicates for submission */
+ f_user_dups[0] = dup(f_user[0]);
+ f_user_dups[1] = dup(f_user[1]);
+
+ /* Generate buffer chain */
+ igt_assert(nop_exec(
+ I915_EXEC_RENDER |
+ I915_EXEC_CREATE_FENCE |
+ I915_EXEC_WAIT_FENCE,
+ f_user_dups[1], &f_out[0]) == 0);
+ igt_assert(nop_exec(
+ I915_EXEC_RENDER |
+ I915_EXEC_CREATE_FENCE |
+ I915_EXEC_WAIT_FENCE,
+ f_user_dups[0], &f_out[1]) == 0);
+
+ /* Wait and check both are still active */
+ usleep(timeout * 1000);
+ igt_assert(get_fence_status(f_out[0]) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(f_out[1]) == FENCE_ACTIVE);
+
+ /* Signal f_user[0] */
+ igt_assert(user_inc_timeline(1) == 0);
+ igt_assert(get_fence_status(f_user[0]) == FENCE_SIGNALED);
+
+ /* Check f_out[0..1] remain active */
+ usleep(timeout * 1000);
+ igt_assert(get_fence_status(f_out[0]) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(f_out[1]) == FENCE_ACTIVE);
+
+ /* Signal f_user[1] */
+ igt_assert(user_inc_timeline(1) == 0);
+ igt_assert(get_fence_status(f_user[1]) == FENCE_SIGNALED);
+
+ /* Check f_out[0..1] signal as expected */
+ igt_assert(wait_fence(f_out[0], &timeout) == 0);
+ igt_assert(wait_fence(f_out[1], &timeout) == 0);
+ igt_assert(get_fence_status(f_out[0]) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(f_out[1]) == FENCE_SIGNALED);
+
+ /* Close fences */
+ close(f_user[0]);
+ close(f_user[1]);
+}
+
+/*
+ * Test to show that fences from drm can be merged and waited on as one
+ */
+static void fence_merge(void)
+{
+ int start_fence, start_fence_dup;
+ int fence_merged_even, fence_merged_odd;
+ int fence_out[4];
+ int fence_out_dup[4];
+ int fence_final;
+
+ int wait_timeout, i;
+
+ close_sw_sync(&user_sync_obj);
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+
+ igt_assert(user_create_fence(&start_fence, 1) == 0);
+ start_fence_dup = dup(start_fence);
+
+ /* Submit and request fences for a chain of workloads */
+ ops_per_sec = calibrate_dummy_load("bcs", _emit_dummy_load__bcs);
+ _emit_dummy_load__bcs(1 * ops_per_sec, 0,
+ start_fence_dup, &fence_out[0]);
+ fence_out_dup[0] = dup(fence_out[0]);
+
+ for (i = 1; i < 4; i++)
+ {
+ _emit_dummy_load__bcs(1 * ops_per_sec, 0, fence_out_dup[i - 1],
+ &fence_out[i]);
+ fence_out_dup[i] = dup(fence_out[i]);
+ }
+
+ /* Merge alternate drm fences into even and odd fences */
+ igt_assert(merge_fence(&fence_merged_even,
+ fence_out[0], fence_out[2]) == 0);
+ igt_assert(merge_fence(&fence_merged_odd,
+ fence_out[1], fence_out[3]) == 0);
+
+ /* Create additional batch to wait on the new merged fences */
+ igt_assert(nop_exec(
+ I915_EXEC_RENDER |
+ I915_EXEC_CREATE_FENCE |
+ I915_EXEC_WAIT_FENCE,
+ dup(fence_merged_odd), &fence_final) == 0);
+
+ /* Signal the user fence to begin the chain */
+ igt_assert(user_inc_timeline(1) == 0);
+
+ /* Wait on each drm fence and check merged fence statuses */
+ wait_timeout = 15000;
+
+ /* After batch 0, all should still be active */
+ igt_assert(wait_fence(fence_out[0], &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_out[0]) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(fence_merged_even) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(fence_merged_odd) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(fence_final) == FENCE_ACTIVE);
+
+ /* After batch 1, all should still be active */
+ igt_assert(wait_fence(fence_out[1], &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_out[1]) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(fence_merged_even) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(fence_merged_odd) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(fence_final) == FENCE_ACTIVE);
+
+ /* After batch 2, fence_merged_even should be complete */
+ igt_assert(wait_fence(fence_merged_even, &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_out[2]) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(fence_merged_even) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(fence_merged_odd) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(fence_final) == FENCE_ACTIVE);
+
+ /* After batch 3, all fences should be complete */
+ igt_assert(wait_fence(fence_merged_odd, &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_out[3]) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(fence_merged_even) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(fence_merged_odd) == FENCE_SIGNALED);
+
+ /* Nop is too short to see whether it was active after merged fence
+ was signalled */
+ igt_assert(wait_fence(fence_final, &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_final) == FENCE_SIGNALED);
+
+ /* Close */
+ close(start_fence);
+}
+
+/*
+ * Test for behaviour of multiple batches dependent on single fence
+ * Show that signalling the fence does not override other dependencies
+ * Scenario A: Same context, multiple batches complete in submission order
+ * despite being triggered by same user fence
+ * Scenario B: Batches in different contexts reliant on same fence but waiting
+ * on earlier work in same context
+ */
+static void fence_multidependency(void)
+{
+ int start_fence;
+ int start_fence_dup, start_fence_dup2;
+ int fence_out[2];
+ int wait_timeout;
+
+ drm_intel_context *ctx[2];
+
+ /* Scenario A */
+ close_sw_sync(&user_sync_obj);
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+
+ /* Create user fence to trigger */
+ igt_assert(user_create_fence(&start_fence, 1) == 0);
+ start_fence_dup = dup(start_fence);
+ start_fence_dup2 = dup(start_fence);
+
+ /* Create long workloads, dependent on same fence */
+ ops_per_sec = calibrate_dummy_load("bcs", _emit_dummy_load__bcs);
+ _emit_dummy_load__bcs(3 * ops_per_sec, 0,
+ start_fence_dup, &fence_out[0]);
+ _emit_dummy_load__bcs(1 * ops_per_sec, 0,
+ start_fence_dup2, &fence_out[1]);
+
+ /* Note that first workload is much longer than the first
+ * to help make sure that it completes first */
+ /* Signal fence */
+ igt_assert(user_inc_timeline(1) == 0);
+ /* Check that workload first submitted completes first */
+ wait_timeout = 45000;
+ igt_assert(wait_fence(fence_out[0], &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_out[0]) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(fence_out[1]) == FENCE_ACTIVE);
+
+ igt_assert(wait_fence(fence_out[1], &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_out[1]) == FENCE_SIGNALED);
+
+ close(start_fence);
+
+ if (batch->gen < 8)
+ {
+ printf("Skipping LRC-related tests\n");
+ return;
+ }
+
+ /* Scenario B */
+ close_sw_sync(&user_sync_obj);
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+
+ /* Create user fence to trigger */
+ igt_assert(user_create_fence(&start_fence, 1) == 0);
+ start_fence_dup = dup(start_fence);
+ start_fence_dup2 = dup(start_fence);
+
+ /* Create contexts */
+ igt_assert((ctx[0] = drm_intel_gem_context_create(bufmgr)) != NULL);
+ igt_assert((ctx[1] = drm_intel_gem_context_create(bufmgr)) != NULL);
+ /* Create long workloads on different contexts */
+ /* They are dependent on the same fence */
+ ops_per_sec = calibrate_dummy_load("bcs", _emit_dummy_load__bcs);
+ intel_batchbuffer_set_context(batch, ctx[0]);
+ _emit_dummy_load__bcs(3 * ops_per_sec, 0,
+ start_fence_dup, &fence_out[0]);
+ intel_batchbuffer_set_context(batch, ctx[1]);
+ _emit_dummy_load__bcs(1 * ops_per_sec, 0,
+ start_fence_dup2, &fence_out[1]);
+ /* Signal fence */
+ igt_assert(user_inc_timeline(1) == 0);
+
+ igt_assert(wait_fence(fence_out[0], &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_out[0]) == FENCE_SIGNALED);
+ igt_assert(get_fence_status(fence_out[1]) == FENCE_ACTIVE);
+
+ igt_assert(wait_fence(fence_out[1], &wait_timeout) == 0);
+ igt_assert(get_fence_status(fence_out[1]) == FENCE_SIGNALED);
+
+ drm_intel_gem_context_destroy(ctx[0]);
+ drm_intel_gem_context_destroy(ctx[1]);
+
+ close(start_fence);
+}
+
+/*
+ * Quick and dirty test to break things by setting up a dependency on a user
+ * fence and then failing to signal it.
+ * That's the theory anyway - it doesn't seem to cause too many problems
+ */
+static void fence_user_forget(void)
+{
+ int myfence;
+ int fence_out;
+ int timeout;
+
+ /* Restart our sync device to reset the timeline to zero */
+ /* This is a shortcoming of the mini-api at the top of this file */
+ close_sw_sync(&user_sync_obj);
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+
+ /* Create a user fence at step 1 */
+ igt_assert(user_create_fence(&myfence, 1) == 0);
+ igt_assert(get_fence_status(myfence) == FENCE_ACTIVE);
+
+ /* Create a submission dependent on this fence */
+ igt_assert(nop_exec(
+ I915_EXEC_RENDER |
+ I915_EXEC_CREATE_FENCE |
+ I915_EXEC_WAIT_FENCE,
+ dup(myfence), &fence_out) == 0);
+
+ /* Wait on our output fence */
+ timeout = 10000; /* in ms */
+ igt_assert(wait_fence(fence_out, &timeout) != 0);
+ igt_assert(get_fence_status(fence_out) == FENCE_ACTIVE);
+
+ /* If we reached here, then we know that the driver is still waiting */
+ /* This will block everything on the render's default context */
+ /* To signal our user fence, advance the timeline by one: */
+ /* igt_assert(user_inc_timeline(1) == 0); */
+ /* Clean up the fence we made - not sure whether this is necessary */
+ close(myfence);
+}
+
+/*
+ * Test to show that fences can be used across multiple engines
+ */
+static void fence_multiengine(void)
+{
+ int start_fence;
+ int fence_a[2];
+ int fence_b;
+ int fence_merged;
+ int timeout = 200; /* in ms */
+
+ close_sw_sync(&user_sync_obj);
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+
+ /* Create user fence */
+ igt_assert(user_create_fence(&start_fence, 1) == 0);
+
+ /* Set up fences and dependent batches */
+
+ /* User triggers A, which triggers B, then back to A */
+ igt_assert(nop_exec(
+ I915_EXEC_RENDER |
+ I915_EXEC_CREATE_FENCE |
+ I915_EXEC_WAIT_FENCE,
+ dup(start_fence), &fence_a[0]) == 0);
+ igt_assert(nop_exec(
+ I915_EXEC_BLT |
+ I915_EXEC_CREATE_FENCE |
+ I915_EXEC_WAIT_FENCE,
+ dup(fence_a[0]), &fence_b) == 0);
+ igt_assert(nop_exec(
+ I915_EXEC_RENDER |
+ I915_EXEC_CREATE_FENCE |
+ I915_EXEC_WAIT_FENCE,
+ dup(fence_b), &fence_a[1]) == 0);
+
+ /* We also create a merged fence to show everything finished */
+ igt_assert(merge_fence(&fence_merged, fence_a[0], fence_a[1]) == 0);
+ igt_assert(merge_fence(&fence_merged, fence_merged, fence_b) == 0);
+
+ /* Wait and check everything is still active */
+ usleep(timeout * 1000);
+ igt_assert(get_fence_status(fence_a[0]) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(fence_a[1]) == FENCE_ACTIVE);
+ igt_assert(get_fence_status(fence_b) == FENCE_ACTIVE);
+
+ /* Trigger first user fence */
+ igt_assert(user_inc_timeline(1) == 0);
+
+ /* Check first fence from A has finished */
+ igt_assert(wait_fence(fence_a[0], &timeout) == 0);
+ igt_assert(get_fence_status(fence_a[0]) == FENCE_SIGNALED);
+ /* Check fence from B has finished */
+ igt_assert(wait_fence(fence_b, &timeout) == 0);
+ igt_assert(get_fence_status(fence_b) == FENCE_SIGNALED);
+ /* Check second fence from A has finished */
+ igt_assert(wait_fence(fence_a[1], &timeout) == 0);
+ igt_assert(get_fence_status(fence_a[1]) == FENCE_SIGNALED);
+
+ /* Check merged fence finished */
+ igt_assert(get_fence_status(fence_merged) == FENCE_SIGNALED);
+
+ close(start_fence);
+}
+
+/*
+ * Gets the status of a given thread id
+ * @mutex pointer to a mutex guarding the state array
+ * @state pointer to the int we are using as a status indicator
+ */
+static int thread_get_status(pthread_mutex_t *mutex, int *state)
+{
+ int value;
+ while (pthread_mutex_trylock(mutex) != 0)
+ usleep(1000);
+ value = *state;
+ pthread_mutex_unlock(mutex);
+ return value;
+}
+
+/*
+ * Sets the status of a thread
+ * @mutex pointer to a mutex guarding the state array
+ * @state pointer to the int we are using as a status indicator
+ * @value value we would like the state set to
+ */
+static void thread_update_status(pthread_mutex_t *mutex,
+ int *state, int value)
+{
+ while (pthread_mutex_trylock(mutex) != 0)
+ usleep(1000);
+
+ *state = value;
+ pthread_mutex_unlock(mutex);
+}
+
+/* Thread states */
+#define TSTATE_BEGUN (1) /* thread has begun */
+#define TSTATE_BUSY (2) /* thread is busy */
+#define TSTATE_FENCE_READY (3) /* thread has produced a fence */
+#define TSTATE_SUBMITTED (4) /* thread has submitted all buffers */
+#define TSTATE_COMPLETE (5) /* thread has completed */
+
+/*
+ * Structure passed to the thrash_thread function
+ */
+struct thrash_data
+{
+ int id; /* id of the thread for reference */
+ int start_fence; /* starting fence, created by user */
+ pthread_mutex_t *state_mutex; /* mutex to control access to state */
+ int *state; /* pointer to this thread's state integer */
+ int *fence_array; /* pointer to the public fence array */
+ int num_submissions; /* number of nop submissions */
+ int num_threads; /* number of threads */
+};
+
+/*
+ * Thread function to thrash the submission mechanism for a given context
+ * Each thread uses the same drm fd and engine
+ * Each thread is given the same user fence as a trigger
+ * Each thread contains a loop to generate many dependent submissions
+ * The returned fences are used as input for other threads
+ * and also merged into a superfence for that thread
+ * When each thread has finished submitting, it signals its readiness
+ * The main thread checks that all threads are ready, then triggers
+ * @data pointer to the thrash_data structure passed in to the thread
+ */
+static void *thrash_thread(void *data)
+{
+ int i;
+ int fence_out, super_fence;
+ int next_thread;
+ struct thrash_data *params;
+ drm_intel_context *ctx;
+
+ /* Get the thread parameters */
+ params = (struct thrash_data *) data;
+ next_thread = (params->id + 1) % params->num_threads;
+
+ thread_update_status(params->state_mutex,
+ params->state, TSTATE_BEGUN);
+
+ /* Create the context */
+ ctx = drm_intel_gem_context_create(bufmgr);
+
+ /* First nop will be dependent on the starting fence */
+ fence_out = params->start_fence;
+
+ /* Submit the nops */
+ for (i = 0; i < params->num_submissions; i++)
+ {
+ /* Show that we're busy */
+ thread_update_status(params->state_mutex,
+ params->state, TSTATE_BUSY);
+
+ igt_assert(nop_exec_with_ctx(ctx,
+ I915_EXEC_RENDER |
+ I915_EXEC_CREATE_FENCE |
+ I915_EXEC_WAIT_FENCE,
+ dup(fence_out), &fence_out) == 0);
+
+ /* Only need to do a merge from the second submission */
+ if (i > 0)
+ igt_assert(merge_fence(&super_fence,
+ super_fence, fence_out) == 0);
+ else
+ super_fence = fence_out;
+
+ /* Update the public fence and make it available */
+ params->fence_array[params->id] = fence_out;
+
+ thread_update_status(params->state_mutex,
+ params->state, TSTATE_FENCE_READY);
+
+ /* Wait for next thread to have an available fence */
+ while (thread_get_status(
+ params->state_mutex,
+ (params->state - params->id + next_thread)) <
+ TSTATE_FENCE_READY)
+ usleep(1000);
+
+ /* Get the next thread's fence */
+ fence_out = params->fence_array[next_thread];
+ }
+
+ printf("[%d] Finished submitting\n", params->id);
+ usleep(1000);
+
+ /* If we have a large enough queue limit in the scheduler, we
+ will have submitted everything already, so the whole queue is
+ waiting for the user to trigger the first fence. But if N_THREADS x
+ num_submissions is greater than the limit, we could have executed
+ this already during our usleep */
+ if (get_fence_status(super_fence) != FENCE_ACTIVE)
+ {
+ printf("[%d] super not active: %d\n", params->id,
+ get_fence_status(super_fence));
+ }
+
+ /*igt_assert(get_fence_status(params->start_fence) == FENCE_ACTIVE);*/
+
+ /* Update thread status */
+ thread_update_status(params->state_mutex,
+ params->state, TSTATE_SUBMITTED);
+ printf("[%d] recorded state %d\n",
+ params->id, *(params->state));
+
+ /* Wait for our super_fence to finish */
+ while (get_fence_status(super_fence) != FENCE_SIGNALED)
+ usleep(1000);
+
+ /* Update thread status */
+ thread_update_status(params->state_mutex,
+ params->state, TSTATE_COMPLETE);
+ printf("[%d] recorded state %d\n",
+ params->id, *(params->state));
+
+ /* Destroy the context */
+ drm_intel_gem_context_destroy(ctx);
+
+ return NULL;
+}
+
+/*
+ * Check the that all the threads have reached a particular status
+ * @p_mutex pointer to a mutex guarding the state array
+ * @num_threads The number of threads we are checking
+ * @statearr Pointer to the first integer in a num_threads-sized array
+ * @state The checkpoint we are expecting the threads to have reached
+ */
+static void check_thread_state(pthread_mutex_t *p_mutex, int num_threads,
+ int *statearr, int state)
+{
+ int done, i;
+ int counter = 0;
+ done = 0;
+ /* A limit of 25 tries is imposed, in case of deadlock */
+ while (!done && (counter < 25))
+ {
+ if (pthread_mutex_trylock(p_mutex) == 0)
+ {
+ done = 1;
+ for (i = 0; i < num_threads; i++)
+ {
+ if (statearr[i] < state)
+ {
+ done = 0;
+ //printf("Waiting for %d on %d\n", state, i);
+ break;
+ }
+ }
+ pthread_mutex_unlock(p_mutex);
+ }
+ usleep(50000);
+ counter++;
+ }
+
+ if (!done)
+ {
+ printf("Couldn't finish checking state %d\n", state);
+ }
+}
+
+/*
+ * Thrash fences across multiple threads, using a single fence to kick it off
+ */
+static void fence_multithread(void)
+{
+ int i;
+ int N_THREADS = 8;
+ int N_SUBMISSIONS = 9;
+ pthread_mutex_t state_mutex;
+ pthread_t thread_handles[N_THREADS];
+ struct thrash_data t_params[N_THREADS];
+ int statearr[N_THREADS];
+ int fence_array[N_THREADS];
+
+ int start_fence;
+
+ close_sw_sync(&user_sync_obj);
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+
+ /* Create user fence */
+ igt_assert(user_create_fence(&start_fence, 1) == 0);
+
+ /* Populate thread data */
+ for (i = 0; i < N_THREADS; i++)
+ {
+ t_params[i].id = i;
+ t_params[i].start_fence = start_fence;
+ t_params[i].state_mutex = &state_mutex;
+ t_params[i].state = &(statearr[i]);
+ t_params[i].fence_array = fence_array;
+ t_params[i].num_submissions = N_SUBMISSIONS;
+ t_params[i].num_threads = N_THREADS;
+ statearr[i] = 0;
+ fence_array[i] = -1;
+ }
+
+ pthread_mutex_init(&state_mutex, NULL);
+
+ /* Launch threads */
+ for (i = 0; i < N_THREADS; i++)
+ pthread_create(&thread_handles[i], NULL, thrash_thread,
+ (void *) (&t_params[i]));
+
+ /* Wait for submissions to complete */
+ check_thread_state(&state_mutex, N_THREADS, statearr, TSTATE_SUBMITTED);
+
+ printf("Finished checking threads for state %d\n", TSTATE_SUBMITTED);
+
+ user_inc_timeline(1);
+ printf("Incremented timeline\n");
+
+ check_thread_state(&state_mutex, N_THREADS, statearr, TSTATE_COMPLETE);
+ printf("Finished checking threads for state %d\n", TSTATE_COMPLETE);
+
+ /* Finish threads */
+ for (i = 0; i < N_THREADS; i++)
+ pthread_join(thread_handles[i], NULL);
+
+ pthread_mutex_destroy(&state_mutex);
+
+ close(start_fence);
+}
+
+igt_main
+{
+ igt_fixture {
+ igt_assert(init_sw_sync(&user_sync_obj) == 0);
+ fd = drm_open_driver_master(DRIVER_INTEL);
+
+ handle = gem_create(fd, 4096);
+ gem_write(fd, handle, 0, nop_batch, sizeof(nop_batch));
+
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ devid = intel_get_drm_devid(fd);
+ batch = intel_batchbuffer_alloc(bufmgr, devid);
+ }
+
+ igt_subtest("not-requested-not-created")
+ fence_not_requested_not_created();
+
+ igt_subtest("create")
+ fence_create();
+
+ igt_subtest("driver-data")
+ fence_driver_data();
+
+ igt_subtest("signaled")
+ fence_signaled();
+
+ igt_subtest("signal")
+ fence_signal();
+
+ igt_subtest("wait")
+ fence_wait();
+
+ igt_subtest("timeout")
+ fence_timeout();
+
+ igt_subtest("wait-fence")
+ fence_wait_fence();
+
+ igt_subtest("wait-fence2")
+ fence_wait_fence2();
+
+ igt_subtest("user-fence-wait")
+ fence_user_fence_wait();
+
+ igt_subtest("user-fence-ooo")
+ fence_ooo_fence();
+
+ igt_subtest("user-fence-leak")
+ fence_user_fence_leak();
+
+ igt_subtest("merge")
+ fence_merge();
+
+/*
+ igt_subtest("multidependency")
+ fence_multidependency();
+
+ igt_subtest("user-fence-forget")
+ fence_user_forget();
+*/
+ igt_subtest("multiengine")
+ fence_multiengine();
+
+ igt_subtest("multithread")
+ fence_multithread();
+
+ igt_fixture {
+ gem_close(fd, handle);
+ intel_batchbuffer_free(batch);
+ close(fd);
+ close_sw_sync(&user_sync_obj);
+ }
+}
--
1.9.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 23+ messages in thread