intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Dave Airlie <airlied@redhat.com>, dri-devel@lists.freedesktop.org
Subject: [PATCH 02/16] drm: Introduce an iterator over holes in the drm_mm range manager
Date: Thu, 15 Nov 2012 11:32:17 +0000	[thread overview]
Message-ID: <1352979151-9934-3-git-send-email-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <1352979151-9934-1-git-send-email-chris@chris-wilson.co.uk>

This will be used i915 in forthcoming patches in order to measure the
largest contiguous chunk of memory available for enabling chipset
features.

v2: Try to make the macro marginally safer and more readable by not
depending upon the drm_mm_hole_node_end() being non-zero. Note that we
need to open code list_for_each() in order to update the hole_start,
hole_end variable on each iteration and keep the macro sane.

v3: Tidy up few BUG_ONs that fell foul of adding additional tests to
drm_mm_hole_node_start().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dave Airlie <airlied@redhat.com>
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/drm_mm.c |   62 ++++++++++++++++------------------------------
 include/drm/drm_mm.h     |   36 +++++++++++++++++++++++++++
 2 files changed, 57 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index bd203b6..b751b8e 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
 }
 EXPORT_SYMBOL(drm_mm_pre_get);
 
-static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
-{
-	return hole_node->start + hole_node->size;
-}
-
-static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
-{
-	struct drm_mm_node *next_node =
-		list_entry(hole_node->node_list.next, struct drm_mm_node,
-			   node_list);
-
-	return next_node->start;
-}
-
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 				 struct drm_mm_node *node,
 				 unsigned long size, unsigned alignment,
@@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 	unsigned long adj_start = hole_start;
 	unsigned long adj_end = hole_end;
 
-	BUG_ON(!hole_node->hole_follows || node->allocated);
+	BUG_ON(node->allocated);
 
 	if (mm->color_adjust)
 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
@@ -155,7 +141,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 	BUG_ON(node->start + node->size > adj_end);
 
 	node->hole_follows = 0;
-	if (node->start + node->size < hole_end) {
+	if (__drm_mm_hole_node_start(node) < hole_end) {
 		list_add(&node->hole_stack, &mm->hole_stack);
 		node->hole_follows = 1;
 	}
@@ -168,15 +154,10 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
 {
 	struct drm_mm_node *hole, *node;
 	unsigned long end = start + size;
+	unsigned long hole_start;
+	unsigned long hole_end;
 
-	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
-		unsigned long hole_start;
-		unsigned long hole_end;
-
-		BUG_ON(!hole->hole_follows);
-		hole_start = drm_mm_hole_node_start(hole);
-		hole_end = drm_mm_hole_node_end(hole);
-
+	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
 		if (hole_start > start || hole_end < end)
 			continue;
 
@@ -293,7 +274,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 	BUG_ON(node->start + node->size > end);
 
 	node->hole_follows = 0;
-	if (node->start + node->size < hole_end) {
+	if (__drm_mm_hole_node_start(node) < hole_end) {
 		list_add(&node->hole_stack, &mm->hole_stack);
 		node->hole_follows = 1;
 	}
@@ -358,12 +339,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
 
 	if (node->hole_follows) {
-		BUG_ON(drm_mm_hole_node_start(node)
-				== drm_mm_hole_node_end(node));
+		BUG_ON(__drm_mm_hole_node_start(node) ==
+		       __drm_mm_hole_node_end(node));
 		list_del(&node->hole_stack);
 	} else
-		BUG_ON(drm_mm_hole_node_start(node)
-				!= drm_mm_hole_node_end(node));
+		BUG_ON(__drm_mm_hole_node_start(node) !=
+		       __drm_mm_hole_node_end(node));
+
 
 	if (!prev_node->hole_follows) {
 		prev_node->hole_follows = 1;
@@ -421,6 +403,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 {
 	struct drm_mm_node *entry;
 	struct drm_mm_node *best;
+	unsigned long adj_start;
+	unsigned long adj_end;
 	unsigned long best_size;
 
 	BUG_ON(mm->scanned_blocks);
@@ -428,17 +412,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 	best = NULL;
 	best_size = ~0UL;
 
-	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
-		unsigned long adj_start = drm_mm_hole_node_start(entry);
-		unsigned long adj_end = drm_mm_hole_node_end(entry);
-
+	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
 		if (mm->color_adjust) {
 			mm->color_adjust(entry, color, &adj_start, &adj_end);
 			if (adj_end <= adj_start)
 				continue;
 		}
 
-		BUG_ON(!entry->hole_follows);
 		if (!check_free_hole(adj_start, adj_end, size, alignment))
 			continue;
 
@@ -465,6 +445,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 {
 	struct drm_mm_node *entry;
 	struct drm_mm_node *best;
+	unsigned long adj_start;
+	unsigned long adj_end;
 	unsigned long best_size;
 
 	BUG_ON(mm->scanned_blocks);
@@ -472,13 +454,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 	best = NULL;
 	best_size = ~0UL;
 
-	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
-		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
-			start : drm_mm_hole_node_start(entry);
-		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
-			end : drm_mm_hole_node_end(entry);
-
-		BUG_ON(!entry->hole_follows);
+	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+		if (adj_start < start)
+			adj_start = start;
+		if (adj_end > end)
+			adj_end = end;
 
 		if (mm->color_adjust) {
 			mm->color_adjust(entry, color, &adj_start, &adj_end);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 4020f96..cd45365 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -89,6 +89,29 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
 {
 	return mm->hole_stack.next;
 }
+
+static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+	return hole_node->start + hole_node->size;
+}
+
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+	BUG_ON(!hole_node->hole_follows);
+	return __drm_mm_hole_node_start(hole_node);
+}
+
+static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+	return list_entry(hole_node->node_list.next,
+			  struct drm_mm_node, node_list)->start;
+}
+
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+	return __drm_mm_hole_node_end(hole_node);
+}
+
 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
 						&(mm)->head_node.node_list, \
 						node_list)
@@ -99,6 +122,19 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
 	     entry != NULL; entry = next, \
 		next = entry ? list_entry(entry->node_list.next, \
 			struct drm_mm_node, node_list) : NULL) \
+
+/* Note that we need to unroll list_for_each_entry in order to inline
+ * setting hole_start and hole_end on each iteration and keep the
+ * macro sane.
+ */
+#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
+	for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+	     &entry->hole_stack != &(mm)->hole_stack ? \
+	     hole_start = drm_mm_hole_node_start(entry), \
+	     hole_end = drm_mm_hole_node_end(entry), \
+	     1 : 0; \
+	     entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
 /*
  * Basic range manager support (drm_mm.c)
  */
-- 
1.7.10.4

  parent reply	other threads:[~2012-11-15 11:33 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-11-15 11:32 Hopefully the last round of stolen work Chris Wilson
2012-11-15 11:32 ` [PATCH 01/16] drm: Introduce drm_mm_create_block() Chris Wilson
2012-11-15 11:32 ` Chris Wilson [this message]
2012-11-15 11:32 ` [PATCH 03/16] drm/i915: Fix detection of base of stolen memory Chris Wilson
2012-11-15 11:32 ` [PATCH 04/16] drm/i915: Avoid clearing preallocated regions from the GTT Chris Wilson
2012-11-15 11:32 ` [PATCH 05/16] drm/i915: Delay allocation of stolen space for FBC Chris Wilson
2012-11-15 11:32 ` [PATCH 06/16] drm/i915: Allow objects to be created with no backing pages, but stolen space Chris Wilson
2012-11-15 11:32 ` [PATCH 07/16] drm/i915: Differentiate between prime and stolen objects Chris Wilson
2012-11-15 11:32 ` [PATCH 08/16] drm/i915: Support readback of stolen objects upon error Chris Wilson
2012-11-15 11:32 ` [PATCH 09/16] drm/i915: Handle stolen objects in pwrite Chris Wilson
2012-11-15 11:32 ` [PATCH 10/16] drm/i915: Handle stolen objects for pread Chris Wilson
2012-11-30 22:33   ` Daniel Vetter
2012-11-30 23:46     ` Chris Wilson
2012-12-01  0:03       ` Daniel Vetter
2012-12-01  0:14         ` Chris Wilson
2012-11-15 11:32 ` [PATCH 11/16] drm/i915: Introduce i915_gem_object_create_stolen() Chris Wilson
2012-11-15 11:32 ` [PATCH 12/16] drm/i915: Allocate fbcon from stolen memory Chris Wilson
2012-11-15 11:32 ` [PATCH 13/16] drm/i915: Allocate ringbuffers " Chris Wilson
2012-11-15 11:32 ` [PATCH 14/16] drm/i915: Allocate overlay registers " Chris Wilson
2012-11-15 11:32 ` [PATCH 15/16] drm/i915: Use a slab for object allocation Chris Wilson
2012-11-30 22:45   ` Daniel Vetter
2012-11-15 11:32 ` [PATCH 16/16] drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl Chris Wilson
2012-11-30 23:06   ` Daniel Vetter
2012-11-30 23:57     ` Chris Wilson
2012-12-01 11:25     ` Chris Wilson
2012-12-01 11:35       ` Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1352979151-9934-3-git-send-email-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=airlied@redhat.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).