All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH] Atomic modeset/pageflip update
@ 2012-10-10 15:04 ville.syrjala
  2012-10-10 15:04 ` [RFC PATCH] drm: Atomic modeset ioctl ville.syrjala
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: ville.syrjala @ 2012-10-10 15:04 UTC (permalink / raw)
  To: dri-devel

Here's another set of atomic modeset and pageflip patches. I just skipped
spamming the list with all the patches that contain the gritty details for
now. These three provide an eagle eye view of the whole thing. The full
series can be found here [1].

The accompanying libdrm stuff is here [2].

So what are the significant changes since the last time:
- Pass the display modes as blobs as opposed to mode object IDs
- Fixed blob passing (it was simply broken in my earlier patches)
- Add completion events
- Add the non-blocking flag
- Clean up the history a bit to make the patches easier on the eye

Unfortunately the whole thing is still based on the pre i915 modeset
rework code. So porting the modesetting side over to the current i915
code is one two bigger tasks that I have left. The other is
implementing non-blocking synchronization with the GPU.

[1] https://gitorious.org/vsyrjala/linux/commits/drm_atomic_9
[2] https://gitorious.org/vsyrjala/drm/commits/drm_atomic_7

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [RFC PATCH] drm: Atomic modeset ioctl
  2012-10-10 15:04 [RFC PATCH] Atomic modeset/pageflip update ville.syrjala
@ 2012-10-10 15:04 ` ville.syrjala
  2012-10-10 15:04 ` [RFC PATCH] drm/i915: Implement atomic modesetting ville.syrjala
  2012-10-10 15:04 ` [RFC PATCH] drm/i915: Add atomic page flip support ville.syrjala
  2 siblings, 0 replies; 4+ messages in thread
From: ville.syrjala @ 2012-10-10 15:04 UTC (permalink / raw)
  To: dri-devel

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

The atomic modeset ioctl cna be used to push any number of new values
for object properties. The driver can then check the full device
configuration as single unit, and try to apply the changes atomically.

The ioctl simply takes a list of object IDs and property IDs and their
values. For setting values to blob properties, the property value
indicates the length of the data, and the actual data is passed via
another blob pointer.

The caller can demand non-blocking operation from the ioctl, and if the
driver can't satisfy that requirement an error will be returned.

The caller can also request to receive asynchronous completion events
after the operation has reached the hardware. An event is sent for each
object specified by the caller, whether or not the actual state of
that object changed. Each event also carries a framebuffer ID, which
indicates to user space that the specified object is no longer
accessing that framebuffer.

TODO: detailed error reporting?

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/drm_crtc.c |  146 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/drm_drv.c  |    1 +
 include/drm/drm.h          |   12 ++++
 include/drm/drmP.h         |    8 +++
 include/drm/drm_crtc.h     |   13 ++++
 include/drm/drm_mode.h     |   16 +++++
 6 files changed, 196 insertions(+), 0 deletions(-)

diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b313958..38c6604 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -4162,3 +4162,149 @@ int drm_calc_vscale(struct drm_region *src, struct drm_region *dst,
 	return vscale;
 }
 EXPORT_SYMBOL(drm_calc_vscale);
+
+int drm_mode_atomic_ioctl(struct drm_device *dev,
+			  void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_atomic *arg = data;
+	uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
+	uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
+	uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+	uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
+	uint64_t __user *blob_values_ptr = (uint64_t __user *)(unsigned long)(arg->blob_values_ptr);
+	unsigned int copied_objs = 0;
+	unsigned int copied_props = 0;
+	unsigned int copied_blobs = 0;
+	void *state;
+	int ret = 0;
+	unsigned int i, j;
+
+	if (!dev->driver->atomic_funcs ||
+	    !dev->driver->atomic_funcs->begin ||
+	    !dev->driver->atomic_funcs->set ||
+	    !dev->driver->atomic_funcs->check ||
+	    !dev->driver->atomic_funcs->commit ||
+	    !dev->driver->atomic_funcs->end)
+		return -ENOSYS;
+
+	if (arg->flags & ~(DRM_MODE_ATOMIC_TEST_ONLY | DRM_MODE_ATOMIC_EVENT | DRM_MODE_ATOMIC_NONBLOCK))
+		return -EINVAL;
+
+	/* can't test and expect an event at the same time. */
+	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY && arg->flags & DRM_MODE_ATOMIC_EVENT)
+		return -EINVAL;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	state = dev->driver->atomic_funcs->begin(dev, file_priv, arg->flags, arg->user_data);
+	if (IS_ERR(state)) {
+		ret = PTR_ERR(state);
+		goto unlock;
+	}
+
+	for (i = 0; i < arg->count_objs; i++) {
+		uint32_t obj_id, count_props;
+		struct drm_mode_object *obj;
+
+		if (get_user(obj_id, objs_ptr + copied_objs)) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
+		if (!obj || !obj->properties) {
+			ret = -ENOENT;
+			goto out;
+		}
+
+		if (get_user(count_props, count_props_ptr + copied_objs)) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		copied_objs++;
+
+		for (j = 0; j < count_props; j++) {
+			uint32_t prop_id;
+			uint64_t prop_value;
+			struct drm_mode_object *prop_obj;
+			struct drm_property *prop;
+			void *blob_data = NULL;
+
+			if (get_user(prop_id, props_ptr + copied_props)) {
+				ret = -EFAULT;
+				goto out;
+			}
+
+			if (!object_has_prop(obj, prop_id)) {
+				ret = -EINVAL;
+				goto out;
+			}
+
+			prop_obj = drm_mode_object_find(dev, prop_id, DRM_MODE_OBJECT_PROPERTY);
+			if (!prop_obj) {
+				ret = -ENOENT;
+				goto out;
+			}
+			prop = obj_to_property(prop_obj);
+
+			if (get_user(prop_value, prop_values_ptr + copied_props)) {
+				ret = -EFAULT;
+				goto out;
+			}
+
+			if (!drm_property_change_is_valid(prop, prop_value)) {
+				ret = -EINVAL;
+				goto out;
+			}
+
+			if (prop->flags & DRM_MODE_PROP_BLOB && prop_value) {
+				uint64_t blob_ptr;
+
+				if (get_user(blob_ptr, blob_values_ptr + copied_blobs)) {
+					ret = -EFAULT;
+					goto out;
+				}
+
+				blob_data = kmalloc(prop_value, GFP_KERNEL);
+				if (!blob_data) {
+					ret = -ENOMEM;
+					goto out;
+				}
+
+				if (copy_from_user(blob_data, (void __user *)(unsigned long)blob_ptr, prop_value)) {
+					kfree(blob_data);
+					ret = -EFAULT;
+					goto out;
+				}
+			}
+
+			/* User space sends the blob pointer even if we don't use it (length==0). */
+			if (prop->flags & DRM_MODE_PROP_BLOB)
+				copied_blobs++;
+
+			/* The driver will be in charge of blob_data from now on. */
+			ret = dev->driver->atomic_funcs->set(dev, state, obj, prop, prop_value, blob_data);
+			if (ret)
+				goto out;
+
+			copied_props++;
+		}
+	}
+
+	ret = dev->driver->atomic_funcs->check(dev, state);
+	if (ret)
+		goto out;
+
+	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+		goto out;
+
+	ret = dev->driver->atomic_funcs->commit(dev, state);
+
+ out:
+	dev->driver->atomic_funcs->end(dev, state);
+ unlock:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 0c059b6..466110b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -166,6 +166,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
diff --git a/include/drm/drm.h b/include/drm/drm.h
index e51035a..9d524b4 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -732,6 +732,7 @@ struct drm_prime_handle {
 #define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
 #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES	DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
 #define DRM_IOCTL_MODE_OBJ_SETPROPERTY	DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
+#define DRM_IOCTL_MODE_ATOMIC	DRM_IOWR(0xBB, struct drm_mode_atomic)
 
 /**
  * Device specific ioctls should only be in their respective headers
@@ -763,6 +764,7 @@ struct drm_event {
 
 #define DRM_EVENT_VBLANK 0x01
 #define DRM_EVENT_FLIP_COMPLETE 0x02
+#define DRM_EVENT_ATOMIC_COMPLETE 0x03
 
 struct drm_event_vblank {
 	struct drm_event base;
@@ -773,6 +775,16 @@ struct drm_event_vblank {
 	__u32 reserved;
 };
 
+struct drm_event_atomic {
+	struct drm_event base;
+	__u64 user_data;
+	__u32 tv_sec;
+	__u32 tv_usec;
+	__u32 sequence;
+	__u32 obj_id;
+	__u32 old_fb_id;
+};
+
 #define DRM_CAP_DUMB_BUFFER 0x1
 #define DRM_CAP_VBLANK_HIGH_CRTC 0x2
 #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d6b67bb..3766cb6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -957,6 +957,8 @@ struct drm_driver {
 
 	/* List of devices hanging off this driver */
 	struct list_head device_list;
+
+	const struct drm_atomic_funcs *atomic_funcs;
 };
 
 #define DRM_MINOR_UNASSIGNED 0
@@ -1050,6 +1052,12 @@ struct drm_pending_vblank_event {
 	struct drm_event_vblank event;
 };
 
+struct drm_pending_atomic_event {
+	struct drm_pending_event base;
+	int pipe;
+	struct drm_event_atomic event;
+};
+
 /**
  * DRM device structure. This structure represent a complete card that
  * may contain multiple heads.
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b2a77ca..38ccbce 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1071,6 +1071,8 @@ extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
 					     struct drm_file *file_priv);
 extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
 					   struct drm_file *file_priv);
+extern int drm_mode_atomic_ioctl(struct drm_device *dev,
+				 void *data, struct drm_file *file_priv);
 
 extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
 				 int *bpp);
@@ -1111,4 +1113,15 @@ extern int drm_calc_hscale(struct drm_region *src, struct drm_region *dst,
 extern int drm_calc_vscale(struct drm_region *src, struct drm_region *dst,
 			   int min_vscale, int max_vscale);
 
+struct drm_atomic_funcs {
+	void *(*begin)(struct drm_device *dev, struct drm_file *file,
+		       uint32_t flags, uint64_t user_data);
+	int (*set)(struct drm_device *dev, void *state,
+		   struct drm_mode_object *obj, struct drm_property *prop,
+		   uint64_t value, void *blob_data);
+	int (*check)(struct drm_device *dev, void *state);
+	int (*commit)(struct drm_device *dev, void *state);
+	void (*end)(struct drm_device *dev, void *state);
+};
+
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 5581980..85406cd 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -459,4 +459,20 @@ struct drm_mode_destroy_dumb {
 	uint32_t handle;
 };
 
+#define DRM_MODE_ATOMIC_TEST_ONLY (1<<0)
+#define DRM_MODE_ATOMIC_EVENT (1<<1)
+#define DRM_MODE_ATOMIC_NONBLOCK (1<<2)
+
+/* FIXME come up with some sane error reporting mechanism? */
+struct drm_mode_atomic {
+	__u32 flags;
+	__u32 count_objs;
+	__u64 objs_ptr;
+	__u64 count_props_ptr;
+	__u64 props_ptr;
+	__u64 prop_values_ptr;
+	__u64 blob_values_ptr;
+	__u64 user_data;
+};
+
 #endif
-- 
1.7.8.6

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [RFC PATCH] drm/i915: Implement atomic modesetting
  2012-10-10 15:04 [RFC PATCH] Atomic modeset/pageflip update ville.syrjala
  2012-10-10 15:04 ` [RFC PATCH] drm: Atomic modeset ioctl ville.syrjala
@ 2012-10-10 15:04 ` ville.syrjala
  2012-10-10 15:04 ` [RFC PATCH] drm/i915: Add atomic page flip support ville.syrjala
  2 siblings, 0 replies; 4+ messages in thread
From: ville.syrjala @ 2012-10-10 15:04 UTC (permalink / raw)
  To: dri-devel

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Implement the atomic modeset operations.

TODO: need to rewrite this for the new intel modeset code

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/Makefile        |    1 +
 drivers/gpu/drm/i915/intel_atomic.c  | 1462 ++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_display.c |    7 +
 3 files changed, 1470 insertions(+), 0 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/intel_atomic.c

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdb..377d100 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  i915_gem_tiling.o \
 	  i915_sysfs.o \
 	  i915_trace_points.o \
+	  intel_atomic.o \
 	  intel_display.o \
 	  intel_crt.o \
 	  intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
new file mode 100644
index 0000000..363018f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -0,0 +1,1462 @@
+/*
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+#include "intel_drv.h"
+
+static struct drm_property *prop_src_x;
+static struct drm_property *prop_src_y;
+static struct drm_property *prop_src_w;
+static struct drm_property *prop_src_h;
+static struct drm_property *prop_crtc_x;
+static struct drm_property *prop_crtc_y;
+static struct drm_property *prop_crtc_w;
+static struct drm_property *prop_crtc_h;
+static struct drm_property *prop_fb_id;
+static struct drm_property *prop_crtc_id;
+static struct drm_property *prop_mode;
+static struct drm_property *prop_connector_ids;
+static struct drm_property *prop_cursor_id;
+static struct drm_property *prop_cursor_x;
+static struct drm_property *prop_cursor_y;
+static struct drm_property *prop_cursor_w;
+static struct drm_property *prop_cursor_h;
+
+struct intel_plane_state {
+	struct drm_plane *plane;
+	struct drm_framebuffer *old_fb;
+	struct intel_plane_coords coords;
+	bool dirty;
+	bool pinned;
+};
+
+struct intel_crtc_state {
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *old_fb;
+	struct drm_i915_gem_object *old_cursor_bo;
+	bool mode_dirty;
+	bool fb_dirty;
+	bool cursor_dirty;
+	bool active_dirty;
+	bool pinned;
+	bool cursor_pinned;
+	unsigned long connectors_bitmask;
+	unsigned long encoders_bitmask;
+};
+
+struct intel_atomic_state {
+	struct drm_file *file;
+	struct intel_plane_state *plane;
+	struct intel_crtc_state *crtc;
+	bool dirty;
+	bool restore_hw;
+	unsigned int flags;
+	uint64_t user_data;
+	struct drm_plane *saved_planes;
+	struct intel_crtc *saved_crtcs;
+	struct drm_connector *saved_connectors;
+	struct drm_encoder *saved_encoders;
+};
+
+static void update_connectors_bitmask(struct intel_crtc_state *st)
+{
+	struct drm_device *dev = st->crtc->dev;
+	struct drm_connector *connector;
+	unsigned int i;
+
+	st->connectors_bitmask = 0;
+
+	i = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder && connector->encoder->crtc == st->crtc)
+			__set_bit(i, &st->connectors_bitmask);
+
+		i++;
+	}
+}
+
+static void update_encoders_bitmask(struct intel_crtc_state *st)
+{
+	struct drm_device *dev = st->crtc->dev;
+	struct drm_encoder *encoder;
+	unsigned int i;
+
+	st->encoders_bitmask = 0;
+
+	i = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == st->crtc)
+			__set_bit(i, &st->encoders_bitmask);
+
+		i++;
+	}
+}
+
+static int process_connectors(struct intel_crtc_state *s, const uint32_t *ids, int count_ids)
+{
+	struct drm_crtc *crtc = s->crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_connector *connectors[count_ids];
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	int i;
+
+	for (i = 0; i < count_ids; i++) {
+		struct drm_encoder *encoder;
+		const struct drm_connector_helper_funcs *connector_funcs;
+		struct drm_mode_object *obj;
+		int j;
+
+		/* don't accept duplicates */
+		for (j = i + 1; j < count_ids; j++)
+			if (ids[i] == ids[j])
+				return -EINVAL;
+
+		obj = drm_mode_object_find(dev, ids[i], DRM_MODE_OBJECT_CONNECTOR);
+		if (!obj)
+			return -ENOENT;
+
+		connector = obj_to_connector(obj);
+		connector_funcs = connector->helper_private;
+
+		encoder = connector_funcs->best_encoder(connector);
+
+		if (!drm_encoder_crtc_ok(encoder, crtc))
+			return -EINVAL;
+
+		connectors[i] = connector;
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		const struct drm_connector_helper_funcs *connector_funcs =
+			connector->helper_private;
+
+		for (i = 0; i < count_ids; i++) {
+			if (connector == connectors[i])
+				break;
+		}
+
+		/* this connector isn't in the set */
+		if (i == count_ids) {
+			/* remove the link to the encoder if this crtc was driving it previously */
+			if (connector->encoder && connector->encoder->crtc == crtc) {
+				s->mode_dirty = true;
+				connector->encoder = NULL;
+			}
+			continue;
+		}
+
+		encoder = connector_funcs->best_encoder(connector);
+
+		if (encoder != connector->encoder) {
+			s->mode_dirty = true;
+			connector->encoder = encoder;
+		}
+
+		if (crtc != encoder->crtc) {
+			s->mode_dirty = true;
+			encoder->crtc = crtc;
+		}
+	}
+
+	/* prune dangling encoder->crtc links pointing to this crtc  */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc && !drm_helper_encoder_in_use(encoder))
+			encoder->crtc = NULL;
+	}
+
+	update_connectors_bitmask(s);
+	update_encoders_bitmask(s);
+
+	return 0;
+}
+
+static size_t intel_atomic_state_size(const struct drm_device *dev)
+{
+	struct intel_atomic_state *state;
+	unsigned int num_connector = dev->mode_config.num_connector;
+	unsigned int num_encoder = dev->mode_config.num_encoder;
+	unsigned int num_crtc = dev->mode_config.num_crtc;
+	unsigned int num_plane = dev->mode_config.num_plane;
+
+	return sizeof *state +
+		num_crtc * sizeof state->crtc[0] +
+		num_plane * sizeof state->plane[0] +
+		num_connector * sizeof state->saved_connectors[0] +
+		num_encoder * sizeof state->saved_encoders[0] +
+		num_crtc * sizeof state->saved_crtcs[0] +
+		num_plane * sizeof state->saved_planes[0];
+}
+
+static void *intel_atomic_begin(struct drm_device *dev, struct drm_file *file,
+				uint32_t flags, uint64_t user_data)
+{
+	struct intel_atomic_state *state;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	unsigned int num_connector = dev->mode_config.num_connector;
+	unsigned int num_encoder = dev->mode_config.num_encoder;
+	unsigned int num_crtc = dev->mode_config.num_crtc;
+	unsigned int num_plane = dev->mode_config.num_plane;
+	int i;
+
+	state = kzalloc(intel_atomic_state_size(dev), GFP_KERNEL);
+	if (!state)
+		return ERR_PTR(-ENOMEM);
+
+	state->flags = flags;
+	state->file = file;
+	state->user_data = user_data;
+
+	state->crtc = (struct intel_crtc_state *)(state + 1);
+	state->plane = (struct intel_plane_state  *)(state->crtc + num_crtc);
+
+	state->saved_connectors = (struct drm_connector *)(state->plane + num_plane);
+	state->saved_encoders = (struct drm_encoder *)(state->saved_connectors + num_connector);
+	state->saved_crtcs = (struct intel_crtc *)(state->saved_encoders + num_encoder);
+	state->saved_planes = (struct drm_plane *)(state->saved_crtcs + num_crtc);
+
+	i = 0;
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct intel_crtc_state *s = &state->crtc[i++];
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		s->crtc = crtc;
+		s->old_fb = crtc->fb;
+		s->old_cursor_bo = intel_crtc->cursor_bo;
+
+		update_connectors_bitmask(s);
+		update_encoders_bitmask(s);
+	}
+
+	i = 0;
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		struct intel_plane_state *s = &state->plane[i++];
+
+		s->plane = plane;
+		s->old_fb = plane->fb;
+	}
+
+	i = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		state->saved_connectors[i++] = *connector;
+	i = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		state->saved_encoders[i++] = *encoder;
+	i = 0;
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		state->saved_crtcs[i++] = *intel_crtc;
+	}
+	i = 0;
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+		state->saved_planes[i++] = *plane;
+
+	state->file = file;
+
+	return state;
+}
+
+static int plane_set(struct intel_atomic_state *s,
+		     struct intel_plane_state *state,
+		     struct drm_property *prop,
+		     uint64_t value)
+{
+	struct drm_plane *plane = state->plane;
+	struct drm_mode_object *obj;
+
+	if (prop == prop_src_x) {
+		if (plane->src_x == value)
+			return 0;
+		plane->src_x = value;
+	} else if (prop == prop_src_y) {
+		if (plane->src_y == value)
+			return 0;
+		plane->src_y = value;
+	} else if (prop == prop_src_w) {
+		if (plane->src_w == value)
+			return 0;
+		plane->src_w = value;
+	} else if (prop == prop_src_h) {
+		if (plane->src_h == value)
+			return 0;
+		plane->src_h = value;
+	} else if (prop == prop_crtc_x) {
+		if (plane->crtc_x == value)
+			return 0;
+		plane->crtc_x = value;
+	} else if (prop == prop_crtc_y) {
+		if (plane->crtc_y == value)
+			return 0;
+		plane->crtc_y = value;
+	} else if (prop == prop_crtc_w) {
+		if (plane->crtc_w == value)
+			return 0;
+		plane->crtc_w = value;
+	} else if (prop == prop_crtc_h) {
+		if (plane->crtc_h == value)
+			return 0;
+		plane->crtc_h = value;
+	} else if (prop == prop_crtc_id) {
+		struct drm_crtc *crtc = NULL;
+
+		if (plane->crtc) {
+			if (value == plane->crtc->base.id)
+				return 0;
+		} else {
+			if (value == 0)
+				return 0;
+		}
+
+		if (value) {
+			obj = drm_mode_object_find(plane->dev, value, DRM_MODE_OBJECT_CRTC);
+			if (!obj) {
+				printk("Unknown CRTC ID %llu\n", value);
+				return -ENOENT;
+			}
+			crtc = obj_to_crtc(obj);
+		}
+
+		plane->crtc = crtc;
+	} else if (prop == prop_fb_id) {
+		struct drm_framebuffer *fb = NULL;
+
+		if (plane->fb) {
+			if (value == plane->fb->base.id)
+				return 0;
+		} else {
+			if (value == 0)
+				return 0;
+		}
+
+		if (value) {
+			obj = drm_mode_object_find(plane->dev, value, DRM_MODE_OBJECT_FB);
+			if (!obj) {
+				printk("Unknown framebuffer ID %llu\n", value);
+				return -ENOENT;
+			}
+			fb = obj_to_fb(obj);
+		}
+
+		plane->fb = fb;
+	} else
+		return -ENOENT;
+
+	state->dirty = true;
+	s->dirty = true;
+
+	return 0;
+}
+
+static int crtc_set(struct intel_atomic_state *s,
+		    struct intel_crtc_state *state,
+		    struct drm_property *prop,
+		    uint64_t value, void *blob_data)
+{
+	struct drm_crtc *crtc = state->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	struct drm_mode_object *obj;
+
+	if (prop == prop_src_x) {
+		if (crtc->x == value)
+			return 0;
+		crtc->x = value;
+		if (crtc_funcs->mode_set_base)
+			state->fb_dirty = true;
+		else
+			state->mode_dirty = true;
+	} else if (prop == prop_src_y) {
+		if (crtc->y == value)
+			return 0;
+		crtc->y = value;
+		if (crtc_funcs->mode_set_base)
+			state->fb_dirty = true;
+		else
+			state->mode_dirty = true;
+	} else if (prop == prop_mode) {
+		struct drm_mode_modeinfo *umode = blob_data;
+		struct drm_display_mode *mode = NULL;
+
+		if (value != 0 && value != sizeof(*umode)) {
+			DRM_DEBUG_KMS("Invalid mode length\n");
+			return -EINVAL;
+		}
+
+		if (!crtc->enabled) {
+			if (value == 0)
+				return 0;
+		}
+
+		if (value) {
+			int ret;
+
+			mode = drm_mode_create(crtc->dev);
+			if (!mode)
+				return -ENOMEM;
+
+			ret = drm_crtc_convert_umode(mode, umode);
+			if (ret) {
+				DRM_DEBUG_KMS("Invalid mode\n");
+				drm_mode_debug_printmodeline(mode);
+				drm_mode_destroy(crtc->dev, mode);
+				return ret;
+			}
+
+			drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+		}
+
+		if (crtc->enabled && mode && drm_mode_equal(&crtc->mode, mode)) {
+			drm_mode_destroy(crtc->dev, mode);
+			return 0;
+		}
+
+		/* turn on/off or active area changed? */
+		if (!crtc->enabled || !mode ||
+		    crtc->mode.hdisplay != mode->hdisplay ||
+		    crtc->mode.vdisplay != mode->vdisplay)
+			state->active_dirty = true;
+
+		if (mode) {
+			crtc->mode = *mode;
+			crtc->enabled = true;
+			drm_mode_destroy(crtc->dev, mode);
+		} else
+			crtc->enabled = false;
+		state->mode_dirty = true;
+	} else if (prop == prop_fb_id) {
+		struct drm_framebuffer *fb = NULL;
+
+		if (crtc->fb) {
+			if (value == crtc->fb->base.id)
+				return 0;
+		} else {
+			if (value == 0)
+				return 0;
+		}
+
+		if (value) {
+			obj = drm_mode_object_find(crtc->dev, value, DRM_MODE_OBJECT_FB);
+			if (!obj) {
+				printk("Unknown framebuffer ID %llu\n", value);
+				return -ENOENT;
+			}
+			fb = obj_to_fb(obj);
+		}
+
+		crtc->fb = fb;
+		if (crtc_funcs->mode_set_base)
+			state->fb_dirty = true;
+		else
+			state->mode_dirty = true;
+	} else if (prop == prop_connector_ids) {
+		const uint32_t *ids = blob_data;
+		uint32_t count_ids = value / sizeof(uint32_t);
+		int ret;
+
+		if (value & 3)
+			return -EINVAL;
+
+		if (count_ids > crtc->dev->mode_config.num_connector)
+			return -ERANGE;
+
+		ret = process_connectors(state, ids, count_ids);
+		if (ret)
+			return ret;
+	} else if (prop == prop_cursor_id) {
+		if (intel_crtc->cursor_handle == value)
+			return 0;
+		intel_crtc->cursor_handle = value;
+		state->cursor_dirty = true;
+	} else if (prop == prop_cursor_x) {
+		if (intel_crtc->cursor_x == value)
+			return 0;
+		intel_crtc->cursor_x = value;
+		state->cursor_dirty = true;
+	} else if (prop == prop_cursor_y) {
+		if (intel_crtc->cursor_y == value)
+			return 0;
+		intel_crtc->cursor_y = value;
+		state->cursor_dirty = true;
+	} else if (prop == prop_cursor_w) {
+		if (value != 0 && value != 64)
+			return -EINVAL;
+		if (intel_crtc->cursor_width == value)
+			return 0;
+		intel_crtc->cursor_width = value;
+		state->cursor_dirty = true;
+	} else if (prop == prop_cursor_h) {
+		if (value != 0 && value != 64)
+			return -EINVAL;
+		if (intel_crtc->cursor_height == value)
+			return 0;
+		intel_crtc->cursor_height = value;
+		state->cursor_dirty = true;
+	} else
+		return -ENOENT;
+
+	s->dirty = true;
+
+	return 0;
+}
+
+static struct intel_plane_state *get_plane_state(const struct drm_device *dev,
+						 struct intel_atomic_state *state,
+						 const struct drm_plane *plane)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_plane; i++)
+		if (plane == state->plane[i].plane)
+			return &state->plane[i];
+
+	return NULL;
+}
+
+static struct intel_crtc_state *get_crtc_state(const struct drm_device *dev,
+					       struct intel_atomic_state *state,
+					       const struct drm_crtc *crtc)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++)
+		if (crtc == state->crtc[i].crtc)
+			return &state->crtc[i];
+
+	return NULL;
+}
+
+static void crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	struct drm_encoder *encoder;
+
+	if (!crtc->enabled)
+		return;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		encoder_funcs->prepare(encoder);
+	}
+
+	drm_crtc_prepare_encoders(dev);
+
+	crtc_funcs->prepare(crtc);
+}
+
+static int crtc_set_base(struct drm_crtc *crtc)
+{
+	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+	return crtc_funcs->mode_set_base_nopin(crtc, crtc->x, crtc->y);
+}
+
+static int crtc_mode_set(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	struct drm_encoder *encoder;
+	int ret;
+
+	if (!crtc->enabled) {
+		crtc_funcs->disable_nopin(crtc);
+		return 0;
+	}
+
+	ret = crtc_funcs->mode_set_nopin(crtc, &crtc->mode, &crtc->hwmode, crtc->x, crtc->y);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->hwmode);
+	}
+
+	return 0;
+}
+
+static void crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	struct drm_encoder *encoder;
+
+	if (!crtc->enabled)
+		return;
+
+	crtc_funcs->commit(crtc);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		encoder_funcs->commit(encoder);
+	}
+}
+
+int intel_commit_plane(struct drm_plane *plane,
+		       struct drm_crtc *crtc,
+		       struct drm_framebuffer *fb,
+		       const struct intel_plane_coords *st,
+		       bool pin);
+
+static void unpin_cursors(struct drm_device *dev,
+			  struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		struct drm_crtc *crtc = st->crtc;
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		if (!st->cursor_pinned)
+			continue;
+
+		intel_crtc_cursor_bo_unref(crtc, intel_crtc->cursor_bo);
+
+		st->cursor_pinned = false;
+	}
+}
+
+static int pin_cursors(struct drm_device *dev,
+			struct intel_atomic_state *s)
+{
+	int i, ret;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		struct drm_crtc *crtc = st->crtc;
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		if (!st->cursor_dirty)
+			continue;
+
+		ret = intel_crtc_cursor_prepare(crtc, s->file,
+						intel_crtc->cursor_handle,
+						intel_crtc->cursor_width,
+						intel_crtc->cursor_height,
+						&intel_crtc->cursor_bo,
+						&intel_crtc->cursor_addr);
+		if (ret)
+			goto unpin;
+
+		st->cursor_pinned = true;
+	}
+
+	return 0;
+
+unpin:
+	unpin_cursors(dev, s);
+
+	return ret;
+}
+
+static void unpin_old_cursors(struct drm_device *dev,
+			      struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		struct drm_crtc *crtc = st->crtc;
+
+		if (!st->cursor_dirty)
+			continue;
+
+		if (!st->old_cursor_bo)
+			continue;
+
+		intel_crtc_cursor_bo_unref(crtc, st->old_cursor_bo);
+	}
+}
+
+static void unpin_fbs(struct drm_device *dev,
+		      struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = dev->mode_config.num_plane - 1; i >= 0; i--) {
+		struct intel_plane_state *st = &s->plane[i];
+		struct drm_plane *plane = st->plane;
+		struct drm_i915_gem_object *obj;
+
+		if (!st->pinned)
+			continue;
+
+		obj = to_intel_framebuffer(plane->fb)->obj;
+
+		mutex_lock(&dev->struct_mutex);
+		intel_unpin_fb_obj(obj);
+		mutex_unlock(&dev->struct_mutex);
+
+		st->pinned = false;
+	}
+
+	for (i = dev->mode_config.num_crtc - 1; i >= 0; i--) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		struct drm_crtc *crtc = st->crtc;
+		struct drm_i915_gem_object *obj;
+
+		if (!st->pinned)
+			continue;
+
+		obj = to_intel_framebuffer(crtc->fb)->obj;
+
+		mutex_lock(&dev->struct_mutex);
+		intel_unpin_fb_obj(obj);
+		mutex_unlock(&dev->struct_mutex);
+
+		st->pinned = false;
+	}
+}
+
+static int pin_fbs(struct drm_device *dev,
+		   struct intel_atomic_state *s)
+{
+	int i, ret;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		struct drm_crtc *crtc = st->crtc;
+		struct drm_i915_gem_object *obj;
+
+		if (!st->fb_dirty)
+			continue;
+
+		if (!crtc->fb)
+			continue;
+
+		obj = to_intel_framebuffer(crtc->fb)->obj;
+
+		mutex_lock(&dev->struct_mutex);
+		ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+		mutex_unlock(&dev->struct_mutex);
+
+		if (ret)
+			goto unpin;
+
+		st->pinned = true;
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+		struct drm_plane *plane = st->plane;
+		struct drm_i915_gem_object *obj;
+
+		if (!st->dirty)
+			continue;
+
+		if (!plane->fb)
+			continue;
+
+		obj = to_intel_framebuffer(plane->fb)->obj;
+
+		mutex_lock(&dev->struct_mutex);
+		ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+		mutex_unlock(&dev->struct_mutex);
+
+		if (ret)
+			goto unpin;
+
+		st->pinned = true;
+	}
+
+	return 0;
+
+ unpin:
+	unpin_fbs(dev, s);
+
+	return ret;
+}
+
+static void unpin_old_fbs(struct drm_device *dev,
+			  struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		struct drm_i915_gem_object *obj;
+
+		if (!st->fb_dirty)
+			continue;
+
+		if (!st->old_fb)
+			continue;
+
+		obj = to_intel_framebuffer(st->old_fb)->obj;
+
+		mutex_lock(&dev->struct_mutex);
+		intel_unpin_fb_obj(obj);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+		struct drm_i915_gem_object *obj;
+
+		if (!st->dirty)
+			continue;
+
+		if (!st->old_fb)
+			continue;
+
+		obj = to_intel_framebuffer(st->old_fb)->obj;
+
+		mutex_lock(&dev->struct_mutex);
+		intel_unpin_fb_obj(obj);
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
+
+static void update_plane_obj(struct drm_device *dev,
+			     struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+		struct drm_plane *plane = st->plane;
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+
+		if (!st->dirty)
+			continue;
+
+		if (plane->fb)
+			intel_plane->obj = to_intel_framebuffer(plane->fb)->obj;
+		else
+			intel_plane->obj = NULL;
+	}
+}
+
+void _intel_disable_plane(struct drm_plane *plane, bool unpin);
+
+static int apply_config(struct drm_device *dev,
+			struct intel_atomic_state *s)
+{
+	int i, ret;
+	struct drm_plane *plane;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+
+		mutex_lock(&dev->struct_mutex);
+
+		if (st->mode_dirty) {
+			/* wait for pending MI_WAIT_FOR_EVENTs */
+			if (st->old_fb)
+				intel_finish_fb(st->old_fb);
+		}
+
+		if (st->fb_dirty) {
+			/* wait for pending page flips */
+			if (st->crtc->fb)
+				intel_finish_fb(st->crtc->fb);
+		}
+
+		mutex_unlock(&dev->struct_mutex);
+
+		if (!st->mode_dirty)
+			continue;
+
+		crtc_prepare(st->crtc);
+	}
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		struct intel_crtc *intel_crtc = to_intel_crtc(st->crtc);
+		int j;
+
+		if (st->mode_dirty) {
+			ret = crtc_mode_set(st->crtc);
+			if (ret)
+				return ret;
+		} else if (st->fb_dirty) {
+			ret = crtc_set_base(st->crtc);
+			if (ret)
+				return ret;
+		}
+
+		if (st->cursor_dirty)
+			intel_crtc_cursor_commit(st->crtc,
+						 intel_crtc->cursor_handle,
+						 intel_crtc->cursor_width,
+						 intel_crtc->cursor_height,
+						 intel_crtc->cursor_bo,
+						 intel_crtc->cursor_addr);
+
+		for (j = 0; j < dev->mode_config.num_plane; j++) {
+			struct intel_plane_state *pst = &s->plane[j];
+			struct drm_plane *plane = pst->plane;
+
+			if (!pst->dirty)
+				continue;
+
+			if (plane->crtc != st->crtc)
+				continue;
+
+			ret = intel_commit_plane(plane, plane->crtc, plane->fb, &pst->coords, false);
+			if (ret)
+				return ret;
+		}
+	}
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+
+		if (!st->mode_dirty)
+			continue;
+
+		crtc_commit(st->crtc);
+	}
+
+	/*
+	 * FIXME perhaps better order would be
+	 * 1. prepare all current objects
+	 * 2. disable unused objects
+	 * 3. set mode for current objects
+	 * 4. commit current objects
+	 */
+	drm_helper_disable_unused_functions(dev);
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		/* planes attached to crtcs already handled in mode_set() */
+		if (!plane->crtc || !plane->fb)
+			_intel_disable_plane(plane, false);
+	}
+
+	/* don't restore the old state in end() */
+	s->dirty = false;
+
+	return 0;
+}
+
+static void restore_state(struct drm_device *dev,
+			  struct intel_atomic_state *s)
+{
+	int i;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+
+	i = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		*connector = s->saved_connectors[i++];
+	i = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		*encoder = s->saved_encoders[i++];
+	i = 0;
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		*intel_crtc = s->saved_crtcs[i++];
+	}
+	i = 0;
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+		*plane = s->saved_planes[i++];
+
+	/* FIXME props etc. */
+
+	/* was the hardware state clobbered? */
+	if (s->restore_hw)
+		apply_config(dev, s);
+}
+
+static int intel_atomic_set(struct drm_device *dev, void *state,
+			    struct drm_mode_object *obj,
+			    struct drm_property *prop,
+			    uint64_t value, void *blob_data)
+{
+	struct intel_atomic_state *s = state;
+	int ret = -EINVAL;
+
+	switch (obj->type) {
+	case DRM_MODE_OBJECT_PLANE:
+		ret = plane_set(s, get_plane_state(dev, s, obj_to_plane(obj)), prop, value);
+		break;
+	case DRM_MODE_OBJECT_CRTC:
+		ret = crtc_set(s, get_crtc_state(dev, s, obj_to_crtc(obj)), prop, value, blob_data);
+		break;
+	default:
+		break;
+	}
+
+	kfree(blob_data);
+
+	return ret;
+}
+
+int intel_check_plane(const struct drm_plane *plane,
+		      const struct drm_crtc *crtc,
+		      const struct drm_framebuffer *fb,
+		      struct intel_plane_coords *st);
+
+static void dirty_planes(const struct drm_device *dev,
+			 struct intel_atomic_state *state,
+			 const struct drm_crtc *crtc)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *s = &state->plane[i];
+
+		if (s->plane->crtc == crtc)
+			s->dirty = true;
+	}
+}
+
+static int check_crtc(struct intel_crtc_state *s)
+{
+	struct drm_crtc *crtc = s->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct drm_display_mode mode, adjusted_mode;
+	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	int ret;
+
+	/* must have a fb and connectors if we have a mode, and vice versa */
+	if (crtc->enabled) {
+		if (!fb)
+			return -EINVAL;
+		if (!drm_helper_crtc_in_use(crtc))
+			return -EINVAL;
+	} else {
+		if (fb)
+			return -EINVAL;
+		if (drm_helper_crtc_in_use(crtc))
+			return -EINVAL;
+	}
+
+	if (crtc->enabled) {
+		if (crtc->mode.hdisplay > fb->width ||
+		    crtc->mode.vdisplay > fb->height ||
+		    crtc->x > fb->width - crtc->mode.hdisplay ||
+		    crtc->y > fb->height - crtc->mode.vdisplay)
+			return -ENOSPC;
+	}
+
+	if (intel_crtc->cursor_visible &&
+	    (intel_crtc->cursor_width != 64 ||
+	     intel_crtc->cursor_height != 64)) {
+		DRM_DEBUG_KMS("only 64x64 cursor sprites are supported\n");
+		return -EINVAL;
+	}
+
+	if (!crtc->enabled || !s->mode_dirty)
+		return 0;
+
+	mode = adjusted_mode = crtc->mode;
+
+	ret = intel_check_clock(dev, crtc, mode.clock);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+		if (!encoder_funcs->mode_fixup(encoder, &mode, &adjusted_mode))
+			return -EINVAL;
+	}
+
+	if (!crtc_funcs->mode_fixup(crtc, &mode, &adjusted_mode))
+		return -EINVAL;
+
+	crtc->hwmode = adjusted_mode;
+
+	return 0;
+}
+
+static int intel_atomic_check(struct drm_device *dev, void *state)
+{
+	struct intel_atomic_state *s = state;
+	int ret;
+	int i;
+
+	if (!s->dirty)
+		return 0;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+
+		if (!st->fb_dirty && !st->mode_dirty && !st->cursor_dirty)
+			continue;
+
+		if (st->mode_dirty && s->flags & DRM_MODE_ATOMIC_NONBLOCK)
+			return -EAGAIN;
+
+		ret = check_crtc(st);
+		if (ret)
+			return ret;
+
+		/*
+		 * Mark all planes on this CRTC as dirty if the active video
+		 * area changed so that the planes will get reclipped correctly.
+		 *
+		 * Also any modesetting will disable+enable the pipe, so the
+		 * plane needs to be re-enabled afterwards too.
+		 * TODO: there's no need to redo the clipping in such cases
+		 * if the computed values were cached, the could be commited
+		 * directly.
+		 */
+		if (st->active_dirty || st->mode_dirty)
+			dirty_planes(dev, s, st->crtc);
+	}
+
+	/* check for conflicts in encoder/connector assignment */
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+		int j;
+
+		for (j = i + 1; j < dev->mode_config.num_crtc; j++) {
+			struct intel_crtc_state *st2 = &s->crtc[j];
+
+			if (st->connectors_bitmask & st2->connectors_bitmask)
+				return -EINVAL;
+
+			if (st->encoders_bitmask & st2->encoders_bitmask)
+				return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+		const struct drm_plane *plane = st->plane;
+
+		if (!st->dirty)
+			continue;
+
+		st->coords.crtc_x = plane->crtc_x;
+		st->coords.crtc_y = plane->crtc_y;
+		st->coords.crtc_w = plane->crtc_w;
+		st->coords.crtc_h = plane->crtc_h;
+
+		st->coords.src_x = plane->src_x;
+		st->coords.src_y = plane->src_y;
+		st->coords.src_w = plane->src_w;
+		st->coords.src_h = plane->src_h;
+
+		ret = intel_check_plane(plane, plane->crtc, plane->fb, &st->coords);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void update_plane_props(struct drm_plane *plane)
+{
+	struct drm_mode_object *obj = &plane->base;
+
+	drm_object_property_set_value(obj, prop_src_x, plane->src_x);
+	drm_object_property_set_value(obj, prop_src_y, plane->src_y);
+	drm_object_property_set_value(obj, prop_src_w, plane->src_w);
+	drm_object_property_set_value(obj, prop_src_h, plane->src_h);
+
+	drm_object_property_set_value(obj, prop_crtc_x, plane->crtc_x);
+	drm_object_property_set_value(obj, prop_crtc_y, plane->crtc_y);
+	drm_object_property_set_value(obj, prop_crtc_w, plane->crtc_w);
+	drm_object_property_set_value(obj, prop_crtc_h, plane->crtc_h);
+
+	drm_object_property_set_value(obj, prop_fb_id, plane->fb ? plane->fb->base.id : 0);
+	drm_object_property_set_value(obj, prop_crtc_id, plane->crtc ? plane->crtc->base.id : 0);
+}
+
+static int update_connector_ids(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_connector *connector;
+	uint64_t value = 0;
+	int i = 0;
+	uint32_t connector_ids[dev->mode_config.num_connector];
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder && connector->encoder->crtc == crtc)
+			connector_ids[i++] = connector->base.id;
+	}
+
+	if (i) {
+		drm_property_blob_replace_data(crtc->connector_ids_blob,
+					       i * sizeof connector_ids[0], connector_ids);
+		value = crtc->connector_ids_blob->base.id;
+	} else
+		drm_property_blob_replace_data(crtc->connector_ids_blob, 0, NULL);
+
+	drm_object_property_set_value(&crtc->base, prop_connector_ids, value);
+
+	return 0;
+}
+
+static int update_mode(struct drm_crtc *crtc)
+{
+	uint64_t value = 0;
+
+	if (crtc->enabled) {
+		struct drm_mode_modeinfo umode;
+
+		drm_crtc_convert_to_umode(&umode, &crtc->mode);
+		drm_property_blob_replace_data(crtc->mode_blob, sizeof umode, &umode);
+		value = crtc->mode_blob->base.id;
+	} else
+		drm_property_blob_replace_data(crtc->mode_blob, 0, NULL);
+
+	drm_object_property_set_value(&crtc->base, prop_mode, value);
+
+	return 0;
+}
+
+static void update_crtc_props(struct drm_crtc *crtc)
+{
+	struct drm_mode_object *obj = &crtc->base;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	drm_object_property_set_value(obj, prop_src_x, crtc->x);
+	drm_object_property_set_value(obj, prop_src_y, crtc->y);
+
+	drm_object_property_set_value(obj, prop_fb_id, crtc->fb ? crtc->fb->base.id : 0);
+
+	drm_object_property_set_value(obj, prop_cursor_id,
+				      intel_crtc->cursor_handle);
+	drm_object_property_set_value(obj, prop_cursor_x, intel_crtc->cursor_x);
+	drm_object_property_set_value(obj, prop_cursor_y, intel_crtc->cursor_y);
+	drm_object_property_set_value(obj, prop_cursor_w,
+				      intel_crtc->cursor_width);
+	drm_object_property_set_value(obj, prop_cursor_h,
+				      intel_crtc->cursor_height);
+
+	update_mode(crtc);
+	update_connector_ids(crtc);
+}
+
+static void update_props(struct drm_device *dev,
+			 struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+
+		if (!st->fb_dirty && !st->mode_dirty)
+			continue;
+
+		update_crtc_props(st->crtc);
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+
+		if (!st->dirty)
+			continue;
+
+		update_plane_props(st->plane);
+	}
+}
+
+static int intel_atomic_commit(struct drm_device *dev, void *state)
+{
+	struct intel_atomic_state *s = state;
+	int ret;
+
+	if (s->flags & DRM_MODE_ATOMIC_NONBLOCK)
+		return -ENOSYS;
+
+	if (!s->dirty)
+		return 0;
+
+	ret = pin_fbs(dev, s);
+	if (ret)
+		return ret;
+
+	ret = pin_cursors(dev, s);
+	if (ret)
+		return ret;
+
+	/* apply in a blocking manner */
+	ret = apply_config(dev, s);
+	if (ret) {
+		unpin_cursors(dev, s);
+		unpin_fbs(dev, s);
+		s->restore_hw = true;
+		return ret;
+	}
+
+	unpin_old_cursors(dev, s);
+	unpin_old_fbs(dev, s);
+
+	update_plane_obj(dev, s);
+
+	update_props(dev, s);
+
+	return 0;
+}
+
+static void intel_atomic_end(struct drm_device *dev, void *state)
+{
+	struct intel_atomic_state *s = state;
+
+	/* restore the state of all objects */
+	if (s->dirty)
+		restore_state(dev, state);
+
+	kfree(state);
+}
+
+static const struct drm_atomic_funcs intel_atomic_funcs = {
+	.begin = intel_atomic_begin,
+	.set = intel_atomic_set,
+	.check = intel_atomic_check,
+	.commit = intel_atomic_commit,
+	.end = intel_atomic_end,
+};
+
+static struct {
+	struct drm_property **prop;
+	const char *name;
+	uint64_t min;
+	uint64_t max;
+} props[] = {
+	{ &prop_src_x, "SRC_X", 0, UINT_MAX },
+	{ &prop_src_y, "SRC_Y", 0, UINT_MAX },
+	{ &prop_src_w, "SRC_W", 0, UINT_MAX },
+	{ &prop_src_h, "SRC_H", 0, UINT_MAX },
+
+	{ &prop_crtc_x, "CRTC_X", INT_MIN, INT_MAX },
+	{ &prop_crtc_y, "CRTC_Y", INT_MIN, INT_MAX },
+	{ &prop_crtc_w, "CRTC_W", 0, INT_MAX },
+	{ &prop_crtc_h, "CRTC_H", 0, INT_MAX },
+
+	{ &prop_fb_id, "FB_ID", 0, UINT_MAX },
+	{ &prop_crtc_id, "CRTC_ID", 0, UINT_MAX },
+
+	{ &prop_cursor_id, "CURSOR_ID", 0, UINT_MAX },
+	{ &prop_cursor_w, "CURSOR_W", 0, UINT_MAX },
+	{ &prop_cursor_h, "CURSOR_H", 0, UINT_MAX },
+	{ &prop_cursor_x, "CURSOR_X", INT_MIN, INT_MAX },
+	{ &prop_cursor_y, "CURSOR_Y", INT_MIN, INT_MAX },
+};
+
+int intel_atomic_init(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	int ret = -ENOMEM;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(props); i++) {
+		*props[i].prop =
+			drm_property_create_range(dev, 0, props[i].name,
+						  props[i].min, props[i].max);
+		if (!*props[i].prop)
+			goto out;
+	}
+
+	/* FIXME create special object ID list property type? */
+	prop_connector_ids = drm_property_create(dev, DRM_MODE_PROP_BLOB, "CONNECTOR_IDS", 0);
+	if (!prop_connector_ids)
+		goto out;
+
+	prop_mode = drm_property_create(dev, DRM_MODE_PROP_BLOB, "MODE", 0);
+	if (!prop_mode)
+		goto out;
+
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		struct drm_mode_object *obj = &plane->base;
+
+		drm_object_attach_property(obj, prop_src_x, 0);
+		drm_object_attach_property(obj, prop_src_y, 0);
+		drm_object_attach_property(obj, prop_src_w, 0);
+		drm_object_attach_property(obj, prop_src_h, 0);
+
+		drm_object_attach_property(obj, prop_crtc_x, 0);
+		drm_object_attach_property(obj, prop_crtc_y, 0);
+		drm_object_attach_property(obj, prop_crtc_w, 0);
+		drm_object_attach_property(obj, prop_crtc_h, 0);
+
+		drm_object_attach_property(obj, prop_fb_id, 0);
+		drm_object_attach_property(obj, prop_crtc_id, 0);
+
+		update_plane_props(plane);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct drm_mode_object *obj = &crtc->base;
+
+		drm_object_attach_property(obj, prop_src_x, 0);
+		drm_object_attach_property(obj, prop_src_y, 0);
+
+		drm_object_attach_property(obj, prop_fb_id, 0);
+		drm_object_attach_property(obj, prop_mode, 0);
+		drm_object_attach_property(obj, prop_connector_ids, 0);
+
+		drm_object_attach_property(obj, prop_cursor_id, 0);
+		drm_object_attach_property(obj, prop_cursor_x, 0);
+		drm_object_attach_property(obj, prop_cursor_y, 0);
+		drm_object_attach_property(obj, prop_cursor_w, 0);
+		drm_object_attach_property(obj, prop_cursor_h, 0);
+
+		crtc->mode_blob = drm_property_create_blob(dev, 0, sizeof(struct drm_mode_modeinfo), NULL);
+		if (!crtc->mode_blob)
+			goto out;
+
+		crtc->connector_ids_blob = drm_property_create_blob(dev, 0,
+								    dev->mode_config.num_connector * sizeof(uint32_t), NULL);
+		if (!crtc->connector_ids_blob)
+			goto out;
+
+		update_crtc_props(crtc);
+	}
+
+	dev->driver->atomic_funcs = &intel_atomic_funcs;
+
+	return 0;
+
+ out:
+	drm_property_destroy(dev, prop_mode);
+	drm_property_destroy(dev, prop_connector_ids);
+
+	while (i--)
+		drm_property_destroy(dev, *props[i].prop);
+
+	return ret;
+}
+
+void intel_atomic_fini(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		drm_property_destroy_blob(dev, crtc->mode_blob);
+		drm_property_destroy_blob(dev, crtc->connector_ids_blob);
+	}
+
+	drm_property_destroy(dev, prop_connector_ids);
+	drm_property_destroy(dev, prop_mode);
+	drm_property_destroy(dev, prop_crtc_id);
+	drm_property_destroy(dev, prop_fb_id);
+
+	drm_property_destroy(dev, prop_crtc_h);
+	drm_property_destroy(dev, prop_crtc_w);
+	drm_property_destroy(dev, prop_crtc_y);
+	drm_property_destroy(dev, prop_crtc_x);
+
+	drm_property_destroy(dev, prop_src_h);
+	drm_property_destroy(dev, prop_src_w);
+	drm_property_destroy(dev, prop_src_y);
+	drm_property_destroy(dev, prop_src_x);
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 592065e..d6a3e51 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -43,6 +43,9 @@
 
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
+void intel_atomic_init(struct drm_device *dev);
+void intel_atomic_fini(struct drm_device *dev);
+
 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -7014,6 +7017,8 @@ static void intel_setup_outputs(struct drm_device *dev)
 			intel_encoder_clones(dev, encoder->clone_mask);
 	}
 
+	intel_atomic_init(dev);
+
 	/* disable all the possible outputs/crtcs before entering KMS mode */
 	drm_helper_disable_unused_functions(dev);
 
@@ -7484,6 +7489,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
 	del_timer_sync(&dev_priv->idle_timer);
 	cancel_work_sync(&dev_priv->idle_work);
 
+	intel_atomic_fini(dev);
+
 	drm_mode_config_cleanup(dev);
 }
 
-- 
1.7.8.6

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [RFC PATCH] drm/i915: Add atomic page flip support
  2012-10-10 15:04 [RFC PATCH] Atomic modeset/pageflip update ville.syrjala
  2012-10-10 15:04 ` [RFC PATCH] drm: Atomic modeset ioctl ville.syrjala
  2012-10-10 15:04 ` [RFC PATCH] drm/i915: Implement atomic modesetting ville.syrjala
@ 2012-10-10 15:04 ` ville.syrjala
  2 siblings, 0 replies; 4+ messages in thread
From: ville.syrjala @ 2012-10-10 15:04 UTC (permalink / raw)
  To: dri-devel

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Utilize drm_flip to implement "atomic page flip". When involving
multiple planes on one pipe, the operations on the planes must be
synchronized via software since the hardware doesn't provide the
means. drm_flip is used to make that happen, and to track the progress
of the flip operations.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_dma.c      |    5 +
 drivers/gpu/drm/i915/i915_drv.h      |    4 +
 drivers/gpu/drm/i915/i915_irq.c      |   18 +-
 drivers/gpu/drm/i915/intel_atomic.c  |  813 +++++++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_display.c |    2 +
 drivers/gpu/drm/i915/intel_drv.h     |    6 +
 6 files changed, 832 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e958e54..79ad32d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1762,6 +1762,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 
 	idr_init(&file_priv->context_idr);
 
+	INIT_LIST_HEAD(&file_priv->pending_flips);
+
 	return 0;
 }
 
@@ -1792,10 +1794,13 @@ void i915_driver_lastclose(struct drm_device * dev)
 	i915_dma_cleanup(dev);
 }
 
+void intel_atomic_free_events(struct drm_device *dev, struct drm_file *file);
+
 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 {
 	i915_gem_context_close(dev, file_priv);
 	i915_gem_release(dev, file_priv);
+	intel_atomic_free_events(dev, file_priv);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 57e4894..80645df 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,6 +36,7 @@
 #include <linux/io-mapping.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
+#include <drm/drm_flip.h>
 #include <drm/intel-gtt.h>
 #include <linux/backlight.h>
 #include <linux/intel-iommu.h>
@@ -845,6 +846,8 @@ typedef struct drm_i915_private {
 	struct work_struct parity_error_work;
 	bool hw_contexts_disabled;
 	uint32_t hw_context_size;
+
+	struct drm_flip_driver flip_driver;
 } drm_i915_private_t;
 
 /* Iterate over initialised rings */
@@ -1055,6 +1058,7 @@ struct drm_i915_file_private {
 		struct list_head request_list;
 	} mm;
 	struct idr context_idr;
+	struct list_head pending_flips;
 };
 
 #define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 23f2ea0..f816dab 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,6 +37,8 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+void intel_atomic_handle_vblank(struct drm_device *dev, int pipe);
+
 /* For display hotplug interrupt */
 static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -547,8 +549,10 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 		for_each_pipe(pipe) {
-			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) {
 				drm_handle_vblank(dev, pipe);
+				intel_atomic_handle_vblank(dev, pipe);
+			}
 
 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
 				intel_prepare_page_flip(dev, pipe);
@@ -685,8 +689,10 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 				intel_prepare_page_flip(dev, i);
 				intel_finish_page_flip_plane(dev, i);
 			}
-			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) {
 				drm_handle_vblank(dev, i);
+				intel_atomic_handle_vblank(dev, i);
+			}
 		}
 
 		/* check event from PCH */
@@ -778,11 +784,15 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 		intel_finish_page_flip_plane(dev, 1);
 	}
 
-	if (de_iir & DE_PIPEA_VBLANK)
+	if (de_iir & DE_PIPEA_VBLANK) {
 		drm_handle_vblank(dev, 0);
+		intel_atomic_handle_vblank(dev, 0);
+	}
 
-	if (de_iir & DE_PIPEB_VBLANK)
+	if (de_iir & DE_PIPEB_VBLANK) {
 		drm_handle_vblank(dev, 1);
+		intel_atomic_handle_vblank(dev, 1);
+	}
 
 	/* check event from PCH */
 	if (de_iir & DE_PCH_EVENT) {
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 363018f..9fa95d3 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -3,6 +3,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
+#include <drm/drm_flip.h>
 
 #include "intel_drv.h"
 
@@ -24,12 +25,29 @@ static struct drm_property *prop_cursor_y;
 static struct drm_property *prop_cursor_w;
 static struct drm_property *prop_cursor_h;
 
+struct intel_flip {
+	struct drm_flip base;
+	u32 vbl_count;
+	bool vblank_ref;
+	bool has_cursor;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_i915_gem_object *old_bo;
+	struct drm_i915_gem_object *old_cursor_bo;
+	struct drm_pending_atomic_event *event;
+	uint32_t old_fb_id;
+	struct list_head pending_head;
+};
+
 struct intel_plane_state {
 	struct drm_plane *plane;
 	struct drm_framebuffer *old_fb;
 	struct intel_plane_coords coords;
 	bool dirty;
 	bool pinned;
+	bool need_event;
+	struct drm_pending_atomic_event *event;
+	struct intel_flip *flip;
 };
 
 struct intel_crtc_state {
@@ -44,6 +62,9 @@ struct intel_crtc_state {
 	bool cursor_pinned;
 	unsigned long connectors_bitmask;
 	unsigned long encoders_bitmask;
+	bool need_event;
+	struct drm_pending_atomic_event *event;
+	struct intel_flip *flip;
 };
 
 struct intel_atomic_state {
@@ -269,6 +290,12 @@ static int plane_set(struct intel_atomic_state *s,
 	struct drm_plane *plane = state->plane;
 	struct drm_mode_object *obj;
 
+	/*
+	 * always send an event when user sets the state of an object,
+	 * even if that state doesn't actually change.
+	 */
+	state->need_event = true;
+
 	if (prop == prop_src_x) {
 		if (plane->src_x == value)
 			return 0;
@@ -362,6 +389,12 @@ static int crtc_set(struct intel_atomic_state *s,
 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_mode_object *obj;
 
+	/*
+	 * always send an event when user sets the state of an object,
+	 * even if that state doesn't actually change.
+	 */
+	state->need_event = true;
+
 	if (prop == prop_src_x) {
 		if (crtc->x == value)
 			return 0;
@@ -847,6 +880,119 @@ static void update_plane_obj(struct drm_device *dev,
 
 void _intel_disable_plane(struct drm_plane *plane, bool unpin);
 
+static struct drm_pending_atomic_event *alloc_event(struct drm_device *dev,
+						    struct drm_file *file_priv,
+						    uint64_t user_data)
+{
+	struct drm_pending_atomic_event *e;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (file_priv->event_space < sizeof e->event) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return ERR_PTR(-ENOSPC);
+	}
+
+	file_priv->event_space -= sizeof e->event;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	e = kzalloc(sizeof *e, GFP_KERNEL);
+	if (!e) {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		file_priv->event_space += sizeof e->event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+
+		return ERR_PTR(-ENOMEM);
+	}
+
+	e->event.base.type = DRM_EVENT_ATOMIC_COMPLETE;
+	e->event.base.length = sizeof e->event;
+	e->event.user_data = user_data;
+	e->base.event = &e->event.base;
+	e->base.file_priv = file_priv;
+	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+	return e;
+}
+
+static void free_event(struct drm_pending_atomic_event *e)
+{
+	e->base.file_priv->event_space += sizeof e->event;
+	kfree(e);
+}
+
+void intel_atomic_free_events(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct intel_flip *intel_flip, *next;
+
+	spin_lock_irq(&dev->event_lock);
+
+	list_for_each_entry_safe(intel_flip, next, &file_priv->pending_flips, pending_head) {
+		free_event(intel_flip->event);
+		intel_flip->event = NULL;
+		list_del_init(&intel_flip->pending_head);
+	}
+
+	spin_unlock_irq(&dev->event_lock);
+}
+
+static void queue_event(struct drm_device *dev, struct drm_crtc *crtc,
+			struct drm_pending_atomic_event *e)
+{
+	int pipe = to_intel_crtc(crtc)->pipe;
+	struct timeval tvbl;
+
+	/* FIXME this is wrong for flips that are completed not at vblank */
+	e->event.sequence = drm_vblank_count_and_time(dev, pipe, &tvbl);
+	e->event.tv_sec = tvbl.tv_sec;
+	e->event.tv_usec = tvbl.tv_usec;
+
+	list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+	wake_up_interruptible(&e->base.file_priv->event_wait);
+}
+
+static void queue_remaining_events(struct drm_device *dev, struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+
+		if (st->event) {
+			if (st->old_fb)
+				st->event->event.old_fb_id = st->old_fb->base.id;
+
+			spin_lock_irq(&dev->event_lock);
+			queue_event(dev, st->crtc, st->event);
+			spin_unlock_irq(&dev->event_lock);
+
+			st->event = NULL;
+		}
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+
+		if (!st->event)
+			continue;
+
+		/* FIXME should send the event to the CRTC the plane was on */
+		if (!st->plane->crtc)
+			continue;
+
+		if (st->old_fb)
+			st->event->event.old_fb_id = st->old_fb->base.id;
+
+		spin_lock_irq(&dev->event_lock);
+		queue_event(dev, st->plane->crtc, st->event);
+		spin_unlock_irq(&dev->event_lock);
+
+		st->event = NULL;
+	}
+}
+
 static int apply_config(struct drm_device *dev,
 			struct intel_atomic_state *s)
 {
@@ -964,7 +1110,14 @@ static void restore_state(struct drm_device *dev,
 	i = 0;
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		/*
+		 * A bit of a hack since we don't have
+		 * state separated from the crtc internals
+		 */
+		spin_lock_irq(&intel_crtc->flip_helper.driver->lock);
+		s->saved_crtcs[i].flip_helper = intel_crtc->flip_helper;
 		*intel_crtc = s->saved_crtcs[i++];
+		spin_unlock_irq(&intel_crtc->flip_helper.driver->lock);
 	}
 	i = 0;
 	list_for_each_entry(plane, &dev->mode_config.plane_list, head)
@@ -1268,17 +1421,145 @@ static void update_props(struct drm_device *dev,
 	}
 }
 
+static void atomic_pipe_commit(struct drm_device *dev,
+			       struct intel_atomic_state *state,
+			       int pipe);
+
+static int apply_nonblocking(struct drm_device *dev, struct intel_atomic_state *s)
+{
+	struct intel_crtc *intel_crtc;
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+
+		/*
+		 * FIXME need to think this stuff through. The intel_crtc_page_flip
+		 * vs. intel_disable_crtc handling doesn't make much sense either.
+		 */
+		if (st->old_fb && atomic_read(&to_intel_framebuffer(st->old_fb)->obj->pending_flip) != 0)
+			return -EBUSY;
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+
+		/*
+		 * FIXME need to think this stuff through. The intel_crtc_page_flip
+		 * vs. intel_disable_crtc handling doesn't make much sense either.
+		 */
+		if (st->old_fb && atomic_read(&to_intel_framebuffer(st->old_fb)->obj->pending_flip) != 0)
+			return -EBUSY;
+	}
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+		atomic_pipe_commit(dev, s, intel_crtc->pipe);
+
+	/* don't restore the old state in end() */
+	s->dirty = false;
+
+	return 0;
+}
+
+static int alloc_flip_data(struct drm_device *dev, struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+
+		if (st->need_event && s->flags & DRM_MODE_ATOMIC_EVENT) {
+			struct drm_pending_atomic_event *e;
+
+			e = alloc_event(dev, s->file, s->user_data);
+			if (IS_ERR(e))
+				return PTR_ERR(e);
+
+			e->event.obj_id = st->crtc->base.id;
+
+			st->event = e;
+		}
+
+		if (!st->fb_dirty && !st->mode_dirty && !st->cursor_dirty)
+			continue;
+
+		st->flip = kzalloc(sizeof *st->flip, GFP_KERNEL);
+		if (!st->flip)
+			return -ENOMEM;
+	}
+
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+
+		if (st->need_event && s->flags & DRM_MODE_ATOMIC_EVENT) {
+			struct drm_pending_atomic_event *e;
+
+			e = alloc_event(dev, s->file, s->user_data);
+			if (IS_ERR(e))
+				return PTR_ERR(e);
+
+			e->event.obj_id = st->plane->base.id;
+
+			st->event = e;
+		}
+
+		if (!st->dirty)
+			continue;
+
+		st->flip = kzalloc(sizeof *st->flip, GFP_KERNEL);
+		if (!st->flip)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void free_flip_data(struct drm_device *dev, struct intel_atomic_state *s)
+{
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &s->crtc[i];
+
+		if (st->event) {
+			spin_lock_irq(&dev->event_lock);
+			free_event(st->event);
+			spin_unlock_irq(&dev->event_lock);
+			st->event = NULL;
+		}
+
+		kfree(st->flip);
+		st->flip = NULL;
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &s->plane[i];
+
+		if (st->event) {
+			spin_lock_irq(&dev->event_lock);
+			free_event(st->event);
+			spin_unlock_irq(&dev->event_lock);
+			st->event = NULL;
+		}
+
+		kfree(st->flip);
+		st->flip = NULL;
+	}
+}
+
 static int intel_atomic_commit(struct drm_device *dev, void *state)
 {
 	struct intel_atomic_state *s = state;
 	int ret;
 
-	if (s->flags & DRM_MODE_ATOMIC_NONBLOCK)
-		return -ENOSYS;
-
 	if (!s->dirty)
 		return 0;
 
+	ret = alloc_flip_data(dev, s);
+	if (ret)
+		return ret;
+
 	ret = pin_fbs(dev, s);
 	if (ret)
 		return ret;
@@ -1287,17 +1568,38 @@ static int intel_atomic_commit(struct drm_device *dev, void *state)
 	if (ret)
 		return ret;
 
-	/* apply in a blocking manner */
-	ret = apply_config(dev, s);
-	if (ret) {
-		unpin_cursors(dev, s);
-		unpin_fbs(dev, s);
-		s->restore_hw = true;
-		return ret;
+	/* try to apply in a non blocking manner */
+	if (s->flags & DRM_MODE_ATOMIC_NONBLOCK) {
+		ret = apply_nonblocking(dev, s);
+		if (ret) {
+			unpin_cursors(dev, s);
+			unpin_fbs(dev, s);
+			return ret;
+		}
+	} else {
+		/* apply in a blocking manner */
+		ret = apply_config(dev, s);
+		if (ret) {
+			unpin_cursors(dev, s);
+			unpin_fbs(dev, s);
+			s->restore_hw = true;
+			return ret;
+		}
+
+		unpin_old_cursors(dev, s);
+		unpin_old_fbs(dev, s);
 	}
 
-	unpin_old_cursors(dev, s);
-	unpin_old_fbs(dev, s);
+	/*
+	 * Either we took the blocking code path, or perhaps the state of
+	 * some objects didn't actually change? Nonetheless the user wanted
+	 * events for all objects he touched, so queue up any events that
+	 * are still pending.
+	 *
+	 * FIXME this needs more work. If the previous flip is still pending
+	 * we shouldn't send this event until that flip completes.
+	 */
+	queue_remaining_events(dev, s);
 
 	update_plane_obj(dev, s);
 
@@ -1310,6 +1612,9 @@ static void intel_atomic_end(struct drm_device *dev, void *state)
 {
 	struct intel_atomic_state *s = state;
 
+	/* don't send events when restoring old state */
+	free_flip_data(dev, state);
+
 	/* restore the state of all objects */
 	if (s->dirty)
 		restore_state(dev, state);
@@ -1351,6 +1656,9 @@ static struct {
 	{ &prop_cursor_y, "CURSOR_Y", INT_MIN, INT_MAX },
 };
 
+static void intel_flip_init(struct drm_device *dev);
+static void intel_flip_fini(struct drm_device *dev);
+
 int intel_atomic_init(struct drm_device *dev)
 {
 	struct drm_crtc *crtc;
@@ -1424,6 +1732,8 @@ int intel_atomic_init(struct drm_device *dev)
 
 	dev->driver->atomic_funcs = &intel_atomic_funcs;
 
+	intel_flip_init(dev);
+
 	return 0;
 
  out:
@@ -1440,6 +1750,8 @@ void intel_atomic_fini(struct drm_device *dev)
 {
 	struct drm_crtc *crtc;
 
+	intel_flip_fini(dev);
+
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		drm_property_destroy_blob(dev, crtc->mode_blob);
 		drm_property_destroy_blob(dev, crtc->connector_ids_blob);
@@ -1460,3 +1772,480 @@ void intel_atomic_fini(struct drm_device *dev)
 	drm_property_destroy(dev, prop_src_y);
 	drm_property_destroy(dev, prop_src_x);
 }
+
+void intel_plane_calc(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y);
+void intel_plane_prepare(struct drm_crtc *crtc);
+void intel_plane_commit(struct drm_crtc *crtc);
+void intel_sprite_calc(struct drm_plane *plane, struct drm_framebuffer *fb, const struct intel_plane_coords *coords);
+void intel_sprite_prepare(struct drm_plane *plane);
+void intel_sprite_commit(struct drm_plane *plane);
+
+enum {
+	/* somwehat arbitrary value */
+	INTEL_VBL_CNT_TIMEOUT = 5,
+};
+
+static void intel_flip_complete(struct drm_flip *flip)
+{
+	struct intel_flip *intel_flip =
+		container_of(flip, struct intel_flip, base);
+	struct drm_device *dev = intel_flip->crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = intel_flip->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (intel_flip->event) {
+		list_del_init(&intel_flip->pending_head);
+		intel_flip->event->event.old_fb_id = intel_flip->old_fb_id;
+		queue_event(dev, crtc, intel_flip->event);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (intel_flip->vblank_ref)
+		drm_vblank_put(dev, pipe);
+
+	/* Possibly allow rendering to old_bo again */
+	if (intel_flip->old_bo) {
+		if (intel_flip->plane) {
+			struct intel_plane *intel_plane = to_intel_plane(intel_flip->plane);
+			/* FIXME need proper numbering for all planes */
+			atomic_clear_mask(1 << (16+intel_plane->pipe), &intel_flip->old_bo->pending_flip.counter);
+		} else
+			atomic_clear_mask(1 << intel_crtc->plane, &intel_flip->old_bo->pending_flip.counter);
+
+		if (atomic_read(&intel_flip->old_bo->pending_flip) == 0)
+			wake_up(&dev_priv->pending_flip_queue);
+	}
+}
+
+
+static void intel_flip_finish(struct drm_flip *flip)
+{
+	struct intel_flip *intel_flip =
+		container_of(flip, struct intel_flip, base);
+	struct drm_device *dev = intel_flip->crtc->dev;
+
+	if (intel_flip->old_bo) {
+		mutex_lock(&dev->struct_mutex);
+
+		intel_unpin_fb_obj(intel_flip->old_bo);
+
+		drm_gem_object_unreference(&intel_flip->old_bo->base);
+
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	if (intel_flip->old_cursor_bo)
+		intel_crtc_cursor_bo_unref(intel_flip->crtc, intel_flip->old_cursor_bo);
+}
+
+static void intel_flip_cleanup(struct drm_flip *flip)
+{
+	struct intel_flip *intel_flip =
+		container_of(flip, struct intel_flip, base);
+
+	kfree(intel_flip);
+}
+
+static void intel_flip_driver_flush(struct drm_flip_driver *driver)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(driver, struct drm_i915_private, flip_driver);
+
+	/* Flush posted writes */
+	I915_READ(PIPEDSL(PIPE_A));
+}
+
+static bool intel_have_new_frmcount(struct drm_device *dev)
+{
+	return IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5;
+}
+
+static u32 get_vbl_count(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+
+	if (intel_have_new_frmcount(dev)) {
+		return I915_READ(PIPE_FRMCOUNT_GM45(pipe));
+	} else  {
+		u32 high, low1, low2, dsl;
+		unsigned int timeout = 0;
+
+		/*
+		 * FIXME check where the frame counter increments, and if
+		 * it happens in the middle of some line, take appropriate
+		 * measures to get a sensible reading.
+		 */
+
+		/* All reads must be satisfied during the same frame */
+		do {
+			low1 = I915_READ(PIPEFRAMEPIXEL(pipe)) >> PIPE_FRAME_LOW_SHIFT;
+			high = I915_READ(PIPEFRAME(pipe)) << 8;
+			dsl = I915_READ(PIPEDSL(pipe));
+			low2 = I915_READ(PIPEFRAMEPIXEL(pipe)) >> PIPE_FRAME_LOW_SHIFT;
+		} while (low1 != low2 && timeout++ < INTEL_VBL_CNT_TIMEOUT);
+
+		if (timeout >= INTEL_VBL_CNT_TIMEOUT)
+			dev_warn(dev->dev,
+				 "Timed out while determining VBL count for pipe %d\n", pipe);
+
+		return ((high | low2) +
+			((dsl >= crtc->hwmode.crtc_vdisplay) &&
+			 (dsl < crtc->hwmode.crtc_vtotal - 1))) & 0xffffff;
+	}
+}
+
+static unsigned int usecs_to_scanlines(struct drm_crtc *crtc,
+				       unsigned int usecs)
+{
+	/* paranoia */
+	if (!crtc->hwmode.crtc_htotal)
+		return 1;
+
+	return DIV_ROUND_UP(usecs * crtc->hwmode.clock,
+			    1000 * crtc->hwmode.crtc_htotal);
+}
+
+static void intel_pipe_vblank_evade(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	/* FIXME needs to be calibrated sensibly */
+	u32 min = crtc->hwmode.crtc_vdisplay - usecs_to_scanlines(crtc, 50);
+	u32 max = crtc->hwmode.crtc_vdisplay - 1;
+	long timeout = msecs_to_jiffies(3);
+	u32 val;
+
+	bool vblank_ref = drm_vblank_get(dev, pipe) == 0;
+
+	intel_crtc->vbl_received = false;
+
+	val = I915_READ(PIPEDSL(pipe));
+
+	while (val >= min && val <= max && timeout > 0) {
+		local_irq_enable();
+
+		timeout = wait_event_timeout(intel_crtc->vbl_wait,
+					     intel_crtc->vbl_received,
+					     timeout);
+
+		local_irq_disable();
+
+		intel_crtc->vbl_received = false;
+
+		val = I915_READ(PIPEDSL(pipe));
+	}
+
+	if (vblank_ref)
+		drm_vblank_put(dev, pipe);
+
+	if (val >= min && val <= max)
+		dev_warn(dev->dev,
+			 "Page flipping close to vblank start (DSL=%u, VBL=%u)\n",
+			 val, crtc->hwmode.crtc_vdisplay);
+}
+
+static bool vbl_count_after_eq_new(u32 a, u32 b)
+{
+	return !((a - b) & 0x80000000);
+}
+
+static bool vbl_count_after_eq(u32 a, u32 b)
+{
+	return !((a - b) & 0x800000);
+}
+
+static bool intel_vbl_check(struct drm_flip *pending_flip, u32 vbl_count)
+{
+	struct intel_flip *old_intel_flip =
+		container_of(pending_flip, struct intel_flip, base);
+	struct drm_device *dev = old_intel_flip->crtc->dev;
+
+	if (intel_have_new_frmcount(dev))
+		return vbl_count_after_eq_new(vbl_count, old_intel_flip->vbl_count);
+	else
+		return vbl_count_after_eq(vbl_count, old_intel_flip->vbl_count);
+}
+
+static void intel_flip_prepare(struct drm_flip *flip)
+{
+	struct intel_flip *intel_flip =
+		container_of(flip, struct intel_flip, base);
+
+	/* FIXME some other pipe/pf stuff could be performed here as well. */
+
+	/* stage double buffer updates which need arming by something else */
+	if (intel_flip->plane)
+		intel_sprite_prepare(intel_flip->plane);
+	else
+		intel_plane_prepare(intel_flip->crtc);
+}
+
+static bool intel_flip_flip(struct drm_flip *flip,
+			    struct drm_flip *pending_flip)
+{
+	struct intel_flip *intel_flip = container_of(flip, struct intel_flip, base);
+	struct drm_crtc *crtc = intel_flip->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	int pipe = intel_crtc->pipe;
+	u32 vbl_count;
+
+	intel_flip->vblank_ref = drm_vblank_get(dev, pipe) == 0;
+
+	vbl_count = get_vbl_count(crtc);
+
+	/* arm all the double buffer registers */
+	if (intel_flip->plane)
+		intel_sprite_commit(intel_flip->plane);
+	else
+		intel_plane_commit(crtc);
+
+	if (intel_flip->has_cursor)
+		intel_crtc_cursor_commit(crtc,
+					 intel_crtc->cursor_handle,
+					 intel_crtc->cursor_width,
+					 intel_crtc->cursor_height,
+					 intel_crtc->cursor_bo,
+					 intel_crtc->cursor_addr);
+
+	/* This flip will happen on the next vblank */
+	if (intel_have_new_frmcount(dev))
+		intel_flip->vbl_count = vbl_count + 1;
+	else
+		intel_flip->vbl_count = (vbl_count + 1) & 0xffffff;
+
+	if (pending_flip) {
+		struct intel_flip *old_intel_flip =
+			container_of(pending_flip, struct intel_flip, base);
+		bool flipped = intel_vbl_check(pending_flip, vbl_count);
+
+		if (!flipped) {
+			swap(intel_flip->old_fb_id, old_intel_flip->old_fb_id);
+			swap(intel_flip->old_bo, old_intel_flip->old_bo);
+			swap(intel_flip->old_cursor_bo, old_intel_flip->old_cursor_bo);
+		}
+
+		return flipped;
+	}
+
+	return false;
+}
+
+static bool intel_flip_vblank(struct drm_flip *pending_flip)
+{
+	struct intel_flip *old_intel_flip =
+		container_of(pending_flip, struct intel_flip, base);
+	u32 vbl_count = get_vbl_count(old_intel_flip->crtc);
+
+	return intel_vbl_check(pending_flip, vbl_count);
+}
+
+static const struct drm_flip_helper_funcs intel_flip_funcs = {
+	.prepare = intel_flip_prepare,
+	.flip = intel_flip_flip,
+	.vblank = intel_flip_vblank,
+	.complete = intel_flip_complete,
+	.finish = intel_flip_finish,
+	.cleanup = intel_flip_cleanup,
+};
+
+static const struct drm_flip_driver_funcs intel_flip_driver_funcs = {
+	.flush = intel_flip_driver_flush,
+};
+
+static void intel_flip_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	struct intel_plane *intel_plane;
+
+	drm_flip_driver_init(&dev_priv->flip_driver, &intel_flip_driver_funcs);
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+		init_waitqueue_head(&intel_crtc->vbl_wait);
+
+		drm_flip_helper_init(&intel_crtc->flip_helper,
+				     &dev_priv->flip_driver, &intel_flip_funcs);
+	}
+
+	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
+		drm_flip_helper_init(&intel_plane->flip_helper,
+				     &dev_priv->flip_driver, &intel_flip_funcs);
+}
+
+static void intel_flip_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	struct intel_plane *intel_plane;
+
+	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
+		drm_flip_helper_fini(&intel_plane->flip_helper);
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+		drm_flip_helper_fini(&intel_crtc->flip_helper);
+
+	drm_flip_driver_fini(&dev_priv->flip_driver);
+}
+
+static void atomic_pipe_commit(struct drm_device *dev,
+			       struct intel_atomic_state *state,
+			       int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *file_priv = state->file->driver_priv;
+	LIST_HEAD(flips);
+	int i;
+	bool pipe_enabled = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe))->active;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct intel_crtc_state *st = &state->crtc[i];
+		struct drm_crtc *crtc = st->crtc;
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		struct intel_flip *intel_flip;
+
+		if (!st->fb_dirty && !st->cursor_dirty)
+			continue;
+
+		if (intel_crtc->pipe != pipe)
+			continue;
+
+		if (!st->flip)
+			continue;
+
+		intel_flip = st->flip;
+		st->flip = NULL;
+
+		drm_flip_init(&intel_flip->base, &intel_crtc->flip_helper);
+
+		if (st->event) {
+			intel_flip->event = st->event;
+			st->event = NULL;
+			/* need to keep track of it in case process exits */
+			spin_lock_irq(&dev->event_lock);
+			list_add_tail(&intel_flip->pending_head, &file_priv->pending_flips);
+			spin_unlock_irq(&dev->event_lock);
+		}
+
+		intel_flip->crtc = crtc;
+
+		intel_plane_calc(crtc, crtc->fb, crtc->x, crtc->y);
+
+		if (st->cursor_dirty) {
+			intel_flip->has_cursor = true;
+			intel_flip->old_cursor_bo = st->old_cursor_bo;
+		}
+
+		if (st->old_fb) {
+			intel_flip->old_fb_id = st->old_fb->base.id;
+			intel_flip->old_bo = to_intel_framebuffer(st->old_fb)->obj;
+
+			mutex_lock(&dev->struct_mutex);
+			drm_gem_object_reference(&intel_flip->old_bo->base);
+			mutex_unlock(&dev->struct_mutex);
+
+			/* Block clients from rendering to the new back buffer until
+			 * the flip occurs and the object is no longer visible.
+			 */
+			atomic_set_mask(1 << intel_crtc->plane, &intel_flip->old_bo->pending_flip.counter);
+		}
+
+		list_add_tail(&intel_flip->base.list, &flips);
+	}
+
+	for (i = 0; i < dev->mode_config.num_plane; i++) {
+		struct intel_plane_state *st = &state->plane[i];
+		struct drm_plane *plane = st->plane;
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+		struct intel_flip *intel_flip;
+
+		if (!st->dirty)
+			continue;
+
+		if (intel_plane->pipe != pipe)
+			continue;
+
+		if (!st->flip)
+			continue;
+
+		intel_flip = st->flip;
+		st->flip = NULL;
+
+		drm_flip_init(&intel_flip->base, &intel_plane->flip_helper);
+
+		if (st->event) {
+			intel_flip->event = st->event;
+			st->event = NULL;
+			/* need to keep track of it in case process exits */
+			spin_lock_irq(&dev->event_lock);
+			list_add_tail(&intel_flip->pending_head, &file_priv->pending_flips);
+			spin_unlock_irq(&dev->event_lock);
+		}
+
+		intel_flip->crtc = intel_get_crtc_for_pipe(dev, pipe);
+		intel_flip->plane = plane;
+
+		intel_sprite_calc(plane, plane->fb, &st->coords);
+
+		if (st->old_fb) {
+			intel_flip->old_fb_id = st->old_fb->base.id;
+			intel_flip->old_bo = to_intel_framebuffer(st->old_fb)->obj;
+
+			mutex_lock(&dev->struct_mutex);
+			drm_gem_object_reference(&intel_flip->old_bo->base);
+			mutex_unlock(&dev->struct_mutex);
+
+			/* Block clients from rendering to the new back buffer until
+			 * the flip occurs and the object is no longer visible.
+			 */
+			/* FIXME need proper numbering for all planes */
+			atomic_set_mask(1 << (16+intel_plane->pipe), &intel_flip->old_bo->pending_flip.counter);
+		}
+
+		list_add_tail(&intel_flip->base.list, &flips);
+	}
+
+	if (list_empty(&flips))
+		return;
+
+	if (!pipe_enabled) {
+		drm_flip_driver_complete_flips(&dev_priv->flip_driver, &flips);
+		return;
+	}
+
+	drm_flip_driver_prepare_flips(&dev_priv->flip_driver, &flips);
+
+	local_irq_disable();
+
+	intel_pipe_vblank_evade(intel_get_crtc_for_pipe(dev, pipe));
+
+	drm_flip_driver_schedule_flips(&dev_priv->flip_driver, &flips);
+
+	local_irq_enable();
+}
+
+void intel_atomic_handle_vblank(struct drm_device *dev, int pipe)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+	struct intel_plane *intel_plane;
+
+	intel_crtc->vbl_received = true;
+
+	drm_flip_helper_vblank(&intel_crtc->flip_helper);
+
+	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) {
+		if (intel_plane->pipe == pipe)
+			drm_flip_helper_vblank(&intel_plane->flip_helper);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0f0b0c9..2f999c6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3351,6 +3351,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 		return;
 
 	intel_crtc_wait_for_pending_flips(crtc);
+	drm_flip_helper_clear(&intel_crtc->flip_helper);
 	drm_vblank_off(dev, pipe);
 	intel_crtc_update_cursor(crtc, false);
 
@@ -3522,6 +3523,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
 	/* Give the overlay scaler a chance to disable if it's on this pipe */
 	intel_crtc_wait_for_pending_flips(crtc);
+	drm_flip_helper_clear(&intel_crtc->flip_helper);
 	drm_vblank_off(dev, pipe);
 	intel_crtc_dpms_overlay(intel_crtc, false);
 	intel_crtc_update_cursor(crtc, false);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 382cb48..845d0bd 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 #include "drm_fb_helper.h"
+#include "drm_flip.h"
 
 #define _wait_for(COND, MS, W) ({ \
 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
@@ -208,6 +209,10 @@ struct intel_crtc {
 	struct intel_pch_pll *pch_pll;
 
 	struct intel_plane_regs primary_regs;
+
+	struct drm_flip_helper flip_helper;
+	wait_queue_head_t vbl_wait;
+	bool vbl_received;
 };
 
 struct intel_plane_coords {
@@ -234,6 +239,7 @@ struct intel_plane {
 			       struct drm_intel_sprite_colorkey *key);
 	void (*get_colorkey)(struct drm_plane *plane,
 			     struct drm_intel_sprite_colorkey *key);
+	struct drm_flip_helper flip_helper;
 };
 
 struct intel_watermark_params {
-- 
1.7.8.6

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-10-10 15:05 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-10-10 15:04 [RFC PATCH] Atomic modeset/pageflip update ville.syrjala
2012-10-10 15:04 ` [RFC PATCH] drm: Atomic modeset ioctl ville.syrjala
2012-10-10 15:04 ` [RFC PATCH] drm/i915: Implement atomic modesetting ville.syrjala
2012-10-10 15:04 ` [RFC PATCH] drm/i915: Add atomic page flip support ville.syrjala

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.