From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from bombadil.infradead.org (bombadil.infradead.org [IPv6:2607:7c80:54:e::133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ml01.01.org (Postfix) with ESMTPS id 27CF621296B01 for ; Mon, 17 Jun 2019 05:28:12 -0700 (PDT) From: Christoph Hellwig Subject: [PATCH 14/25] memremap: provide an optional internal refcount in struct dev_pagemap Date: Mon, 17 Jun 2019 14:27:22 +0200 Message-Id: <20190617122733.22432-15-hch@lst.de> In-Reply-To: <20190617122733.22432-1-hch@lst.de> References: <20190617122733.22432-1-hch@lst.de> MIME-Version: 1.0 List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: linux-nvdimm-bounces@lists.01.org Sender: "Linux-nvdimm" To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-nvdimm@lists.01.org, linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-mm@kvack.org, nouveau@lists.freedesktop.org List-ID: Provide an internal refcounting logic if no ->ref field is provided in the pagemap passed into devm_memremap_pages so that callers don't have to reinvent it poorly. Signed-off-by: Christoph Hellwig --- include/linux/memremap.h | 4 ++ kernel/memremap.c | 64 ++++++++++++++++++++++++------- tools/testing/nvdimm/test/iomap.c | 17 ++++++-- 3 files changed, 68 insertions(+), 17 deletions(-) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 7289eb091b04..7e0f072ddce7 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -95,6 +95,8 @@ struct dev_pagemap_ops { * @altmap: pre-allocated/reserved memory for vmemmap allocations * @res: physical address range covered by @ref * @ref: reference count that pins the devm_memremap_pages() mapping + * @internal_ref: internal reference if @ref is not provided by the caller + * @done: completion for @internal_ref * @dev: host device of the mapping for debug * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h @@ -105,6 +107,8 @@ struct dev_pagemap { struct vmem_altmap altmap; struct resource res; struct percpu_ref *ref; + struct percpu_ref internal_ref; + struct completion done; struct device *dev; enum memory_type type; unsigned int flags; diff --git a/kernel/memremap.c b/kernel/memremap.c index b41d98a64ebf..60693a1e8e92 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -29,7 +29,7 @@ static void dev_pagemap_put_ops(void *data) static int dev_pagemap_get_ops(struct device *dev, struct dev_pagemap *pgmap) { - if (!pgmap->ops->page_free) { + if (!pgmap->ops || !pgmap->ops->page_free) { WARN(1, "Missing page_free method\n"); return -EINVAL; } @@ -75,6 +75,24 @@ static unsigned long pfn_next(unsigned long pfn) #define for_each_device_pfn(pfn, map) \ for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) +static void dev_pagemap_kill(struct dev_pagemap *pgmap) +{ + if (pgmap->ops && pgmap->ops->kill) + pgmap->ops->kill(pgmap); + else + percpu_ref_kill(pgmap->ref); +} + +static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) +{ + if (pgmap->ops && pgmap->ops->cleanup) { + pgmap->ops->cleanup(pgmap); + } else { + wait_for_completion(&pgmap->done); + percpu_ref_exit(pgmap->ref); + } +} + static void devm_memremap_pages_release(void *data) { struct dev_pagemap *pgmap = data; @@ -84,10 +102,10 @@ static void devm_memremap_pages_release(void *data) unsigned long pfn; int nid; - pgmap->ops->kill(pgmap); + dev_pagemap_kill(pgmap); for_each_device_pfn(pfn, pgmap) put_page(pfn_to_page(pfn)); - pgmap->ops->cleanup(pgmap); + dev_pagemap_cleanup(pgmap); /* pages are dead and unused, undo the arch mapping */ align_start = res->start & ~(SECTION_SIZE - 1); @@ -114,20 +132,29 @@ static void devm_memremap_pages_release(void *data) "%s: failed to free all reserved pages\n", __func__); } +static void dev_pagemap_percpu_release(struct percpu_ref *ref) +{ + struct dev_pagemap *pgmap = + container_of(ref, struct dev_pagemap, internal_ref); + + complete(&pgmap->done); +} + /** * devm_memremap_pages - remap and provide memmap backing for the given resource * @dev: hosting device for @res * @pgmap: pointer to a struct dev_pagemap * * Notes: - * 1/ At a minimum the res, ref and type and ops members of @pgmap must be - * initialized by the caller before passing it to this function + * 1/ At a minimum the res and type members of @pgmap must be initialized + * by the caller before passing it to this function * * 2/ The altmap field may optionally be initialized, in which case * PGMAP_ALTMAP_VALID must be set in pgmap->flags. * - * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped - * at devm_memremap_pages_release() time, or if this routine fails. + * 3/ The ref field may optionally be provided, in which pgmap->ref must be + * 'live' on entry and will be killed and reaped at + * devm_memremap_pages_release() time, or if this routine fails. * * 4/ res is expected to be a host memory range that could feasibly be * treated as a "System RAM" range, i.e. not a device mmio range, but @@ -178,10 +205,21 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) break; } - if (!pgmap->ref || !pgmap->ops || !pgmap->ops->kill || - !pgmap->ops->cleanup) { - WARN(1, "Missing reference count teardown definition\n"); - return ERR_PTR(-EINVAL); + if (!pgmap->ref) { + if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) + return ERR_PTR(-EINVAL); + + init_completion(&pgmap->done); + error = percpu_ref_init(&pgmap->internal_ref, + dev_pagemap_percpu_release, 0, GFP_KERNEL); + if (error) + return ERR_PTR(error); + pgmap->ref = &pgmap->internal_ref; + } else { + if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { + WARN(1, "Missing reference count teardown definition\n"); + return ERR_PTR(-EINVAL); + } } if (pgmap->type != MEMORY_DEVICE_PCI_P2PDMA) { @@ -299,8 +337,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) err_pfn_remap: pgmap_array_delete(res); err_array: - pgmap->ops->kill(pgmap); - pgmap->ops->cleanup(pgmap); + dev_pagemap_kill(pgmap); + dev_pagemap_cleanup(pgmap); return ERR_PTR(error); } EXPORT_SYMBOL_GPL(devm_memremap_pages); diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index 3a1fa7735f47..8cd9b9873a7f 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c @@ -106,10 +106,19 @@ EXPORT_SYMBOL(__wrap_devm_memremap); static void nfit_test_kill(void *_pgmap) { - WARN_ON(!pgmap || !pgmap->ref || !pgmap->ops->kill || - !pgmap->ops->cleanup); - pgmap->ops->kill(pgmap); - pgmap->ops->cleanup(pgmap); + WARN_ON(!pgmap || !pgmap->ref) + + if (pgmap->ops && pgmap->ops->kill) + pgmap->ops->kill(pgmap); + else + percpu_ref_kill(pgmap->ref); + + if (pgmap->ops && pgmap->ops->cleanup) { + pgmap->ops->cleanup(pgmap); + } else { + wait_for_completion(&pgmap->done); + percpu_ref_exit(pgmap->ref); + } } void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) -- 2.20.1 _______________________________________________ Linux-nvdimm mailing list Linux-nvdimm@lists.01.org https://lists.01.org/mailman/listinfo/linux-nvdimm From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.5 required=3.0 tests=DKIM_INVALID,DKIM_SIGNED, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3E76BC31E59 for ; Mon, 17 Jun 2019 12:28:16 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 0C9A92089E for ; Mon, 17 Jun 2019 12:28:16 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=fail reason="signature verification failed" (2048-bit key) header.d=infradead.org header.i=@infradead.org header.b="hceq9aC6" Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728022AbfFQM2P (ORCPT ); Mon, 17 Jun 2019 08:28:15 -0400 Received: from bombadil.infradead.org ([198.137.202.133]:42716 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727957AbfFQM2M (ORCPT ); Mon, 17 Jun 2019 08:28:12 -0400 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=bombadil.20170209; h=Content-Transfer-Encoding: MIME-Version:References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender :Reply-To:Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From :Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=GlNxVMm8W4O5v065JMNFU6bkRl3AbEbXSnrBr8peAhc=; b=hceq9aC6tJ73idi0gys5khCNkb 4oa/Hc09tP2sutmhvsQMCCgbZeQ65RgYiliHtKl5ES7k4PIx5QoJOmP158XuVwgiaDMwakShQWbmE 2BQA9YBQorrWSgUr5kX4t9vAcPMRIvxeaD7vV44sRG6NUoWodLAfV48Htg6MQ2WO2MAtKlChRMtf5 wCam1QxlrgXhMZA6F9p2gc2Ek7aGGPTwazLl9Df998k9Zp7cthixwUdhiZdLy/lL4lTfdFLHfTxA4 MgbRgO934AqctA5SWdhgmmfrTW0+igprb3FsAQeA/tukrmljjK2fU4vyzRWkn67p8G3T2FPFSv6xF 0pBQDtSw==; Received: from clnet-p19-102.ikbnet.co.at ([83.175.77.102] helo=localhost) by bombadil.infradead.org with esmtpsa (Exim 4.92 #3 (Red Hat Linux)) id 1hcqk4-0000Ep-9f; Mon, 17 Jun 2019 12:28:08 +0000 From: Christoph Hellwig To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-mm@kvack.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, linux-nvdimm@lists.01.org, linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH 14/25] memremap: provide an optional internal refcount in struct dev_pagemap Date: Mon, 17 Jun 2019 14:27:22 +0200 Message-Id: <20190617122733.22432-15-hch@lst.de> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190617122733.22432-1-hch@lst.de> References: <20190617122733.22432-1-hch@lst.de> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SRS-Rewrite: SMTP reverse-path rewritten from by bombadil.infradead.org. See http://www.infradead.org/rpr.html Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Provide an internal refcounting logic if no ->ref field is provided in the pagemap passed into devm_memremap_pages so that callers don't have to reinvent it poorly. Signed-off-by: Christoph Hellwig --- include/linux/memremap.h | 4 ++ kernel/memremap.c | 64 ++++++++++++++++++++++++------- tools/testing/nvdimm/test/iomap.c | 17 ++++++-- 3 files changed, 68 insertions(+), 17 deletions(-) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 7289eb091b04..7e0f072ddce7 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -95,6 +95,8 @@ struct dev_pagemap_ops { * @altmap: pre-allocated/reserved memory for vmemmap allocations * @res: physical address range covered by @ref * @ref: reference count that pins the devm_memremap_pages() mapping + * @internal_ref: internal reference if @ref is not provided by the caller + * @done: completion for @internal_ref * @dev: host device of the mapping for debug * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h @@ -105,6 +107,8 @@ struct dev_pagemap { struct vmem_altmap altmap; struct resource res; struct percpu_ref *ref; + struct percpu_ref internal_ref; + struct completion done; struct device *dev; enum memory_type type; unsigned int flags; diff --git a/kernel/memremap.c b/kernel/memremap.c index b41d98a64ebf..60693a1e8e92 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -29,7 +29,7 @@ static void dev_pagemap_put_ops(void *data) static int dev_pagemap_get_ops(struct device *dev, struct dev_pagemap *pgmap) { - if (!pgmap->ops->page_free) { + if (!pgmap->ops || !pgmap->ops->page_free) { WARN(1, "Missing page_free method\n"); return -EINVAL; } @@ -75,6 +75,24 @@ static unsigned long pfn_next(unsigned long pfn) #define for_each_device_pfn(pfn, map) \ for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) +static void dev_pagemap_kill(struct dev_pagemap *pgmap) +{ + if (pgmap->ops && pgmap->ops->kill) + pgmap->ops->kill(pgmap); + else + percpu_ref_kill(pgmap->ref); +} + +static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) +{ + if (pgmap->ops && pgmap->ops->cleanup) { + pgmap->ops->cleanup(pgmap); + } else { + wait_for_completion(&pgmap->done); + percpu_ref_exit(pgmap->ref); + } +} + static void devm_memremap_pages_release(void *data) { struct dev_pagemap *pgmap = data; @@ -84,10 +102,10 @@ static void devm_memremap_pages_release(void *data) unsigned long pfn; int nid; - pgmap->ops->kill(pgmap); + dev_pagemap_kill(pgmap); for_each_device_pfn(pfn, pgmap) put_page(pfn_to_page(pfn)); - pgmap->ops->cleanup(pgmap); + dev_pagemap_cleanup(pgmap); /* pages are dead and unused, undo the arch mapping */ align_start = res->start & ~(SECTION_SIZE - 1); @@ -114,20 +132,29 @@ static void devm_memremap_pages_release(void *data) "%s: failed to free all reserved pages\n", __func__); } +static void dev_pagemap_percpu_release(struct percpu_ref *ref) +{ + struct dev_pagemap *pgmap = + container_of(ref, struct dev_pagemap, internal_ref); + + complete(&pgmap->done); +} + /** * devm_memremap_pages - remap and provide memmap backing for the given resource * @dev: hosting device for @res * @pgmap: pointer to a struct dev_pagemap * * Notes: - * 1/ At a minimum the res, ref and type and ops members of @pgmap must be - * initialized by the caller before passing it to this function + * 1/ At a minimum the res and type members of @pgmap must be initialized + * by the caller before passing it to this function * * 2/ The altmap field may optionally be initialized, in which case * PGMAP_ALTMAP_VALID must be set in pgmap->flags. * - * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped - * at devm_memremap_pages_release() time, or if this routine fails. + * 3/ The ref field may optionally be provided, in which pgmap->ref must be + * 'live' on entry and will be killed and reaped at + * devm_memremap_pages_release() time, or if this routine fails. * * 4/ res is expected to be a host memory range that could feasibly be * treated as a "System RAM" range, i.e. not a device mmio range, but @@ -178,10 +205,21 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) break; } - if (!pgmap->ref || !pgmap->ops || !pgmap->ops->kill || - !pgmap->ops->cleanup) { - WARN(1, "Missing reference count teardown definition\n"); - return ERR_PTR(-EINVAL); + if (!pgmap->ref) { + if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) + return ERR_PTR(-EINVAL); + + init_completion(&pgmap->done); + error = percpu_ref_init(&pgmap->internal_ref, + dev_pagemap_percpu_release, 0, GFP_KERNEL); + if (error) + return ERR_PTR(error); + pgmap->ref = &pgmap->internal_ref; + } else { + if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { + WARN(1, "Missing reference count teardown definition\n"); + return ERR_PTR(-EINVAL); + } } if (pgmap->type != MEMORY_DEVICE_PCI_P2PDMA) { @@ -299,8 +337,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) err_pfn_remap: pgmap_array_delete(res); err_array: - pgmap->ops->kill(pgmap); - pgmap->ops->cleanup(pgmap); + dev_pagemap_kill(pgmap); + dev_pagemap_cleanup(pgmap); return ERR_PTR(error); } EXPORT_SYMBOL_GPL(devm_memremap_pages); diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index 3a1fa7735f47..8cd9b9873a7f 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c @@ -106,10 +106,19 @@ EXPORT_SYMBOL(__wrap_devm_memremap); static void nfit_test_kill(void *_pgmap) { - WARN_ON(!pgmap || !pgmap->ref || !pgmap->ops->kill || - !pgmap->ops->cleanup); - pgmap->ops->kill(pgmap); - pgmap->ops->cleanup(pgmap); + WARN_ON(!pgmap || !pgmap->ref) + + if (pgmap->ops && pgmap->ops->kill) + pgmap->ops->kill(pgmap); + else + percpu_ref_kill(pgmap->ref); + + if (pgmap->ops && pgmap->ops->cleanup) { + pgmap->ops->cleanup(pgmap); + } else { + wait_for_completion(&pgmap->done); + percpu_ref_exit(pgmap->ref); + } } void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) -- 2.20.1 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Subject: [PATCH 14/25] memremap: provide an optional internal refcount in struct dev_pagemap Date: Mon, 17 Jun 2019 14:27:22 +0200 Message-ID: <20190617122733.22432-15-hch@lst.de> References: <20190617122733.22432-1-hch@lst.de> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: In-Reply-To: <20190617122733.22432-1-hch-jcswGhMUV9g@public.gmane.org> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: nouveau-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org Sender: "Nouveau" To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org, linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org, linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org, nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org List-Id: nouveau.vger.kernel.org UHJvdmlkZSBhbiBpbnRlcm5hbCByZWZjb3VudGluZyBsb2dpYyBpZiBubyAtPnJlZiBmaWVsZCBp cyBwcm92aWRlZAppbiB0aGUgcGFnZW1hcCBwYXNzZWQgaW50byBkZXZtX21lbXJlbWFwX3BhZ2Vz IHNvIHRoYXQgY2FsbGVycyBkb24ndApoYXZlIHRvIHJlaW52ZW50IGl0IHBvb3JseS4KClNpZ25l ZC1vZmYtYnk6IENocmlzdG9waCBIZWxsd2lnIDxoY2hAbHN0LmRlPgotLS0KIGluY2x1ZGUvbGlu dXgvbWVtcmVtYXAuaCAgICAgICAgICB8ICA0ICsrCiBrZXJuZWwvbWVtcmVtYXAuYyAgICAgICAg ICAgICAgICAgfCA2NCArKysrKysrKysrKysrKysrKysrKysrKystLS0tLS0tCiB0b29scy90ZXN0 aW5nL252ZGltbS90ZXN0L2lvbWFwLmMgfCAxNyArKysrKystLQogMyBmaWxlcyBjaGFuZ2VkLCA2 OCBpbnNlcnRpb25zKCspLCAxNyBkZWxldGlvbnMoLSkKCmRpZmYgLS1naXQgYS9pbmNsdWRlL2xp bnV4L21lbXJlbWFwLmggYi9pbmNsdWRlL2xpbnV4L21lbXJlbWFwLmgKaW5kZXggNzI4OWViMDkx YjA0Li43ZTBmMDcyZGRjZTcgMTAwNjQ0Ci0tLSBhL2luY2x1ZGUvbGludXgvbWVtcmVtYXAuaAor KysgYi9pbmNsdWRlL2xpbnV4L21lbXJlbWFwLmgKQEAgLTk1LDYgKzk1LDggQEAgc3RydWN0IGRl dl9wYWdlbWFwX29wcyB7CiAgKiBAYWx0bWFwOiBwcmUtYWxsb2NhdGVkL3Jlc2VydmVkIG1lbW9y eSBmb3Igdm1lbW1hcCBhbGxvY2F0aW9ucwogICogQHJlczogcGh5c2ljYWwgYWRkcmVzcyByYW5n ZSBjb3ZlcmVkIGJ5IEByZWYKICAqIEByZWY6IHJlZmVyZW5jZSBjb3VudCB0aGF0IHBpbnMgdGhl IGRldm1fbWVtcmVtYXBfcGFnZXMoKSBtYXBwaW5nCisgKiBAaW50ZXJuYWxfcmVmOiBpbnRlcm5h bCByZWZlcmVuY2UgaWYgQHJlZiBpcyBub3QgcHJvdmlkZWQgYnkgdGhlIGNhbGxlcgorICogQGRv bmU6IGNvbXBsZXRpb24gZm9yIEBpbnRlcm5hbF9yZWYKICAqIEBkZXY6IGhvc3QgZGV2aWNlIG9m IHRoZSBtYXBwaW5nIGZvciBkZWJ1ZwogICogQGRhdGE6IHByaXZhdGUgZGF0YSBwb2ludGVyIGZv ciBwYWdlX2ZyZWUoKQogICogQHR5cGU6IG1lbW9yeSB0eXBlOiBzZWUgTUVNT1JZXyogaW4gbWVt b3J5X2hvdHBsdWcuaApAQCAtMTA1LDYgKzEwNyw4IEBAIHN0cnVjdCBkZXZfcGFnZW1hcCB7CiAJ c3RydWN0IHZtZW1fYWx0bWFwIGFsdG1hcDsKIAlzdHJ1Y3QgcmVzb3VyY2UgcmVzOwogCXN0cnVj dCBwZXJjcHVfcmVmICpyZWY7CisJc3RydWN0IHBlcmNwdV9yZWYgaW50ZXJuYWxfcmVmOworCXN0 cnVjdCBjb21wbGV0aW9uIGRvbmU7CiAJc3RydWN0IGRldmljZSAqZGV2OwogCWVudW0gbWVtb3J5 X3R5cGUgdHlwZTsKIAl1bnNpZ25lZCBpbnQgZmxhZ3M7CmRpZmYgLS1naXQgYS9rZXJuZWwvbWVt cmVtYXAuYyBiL2tlcm5lbC9tZW1yZW1hcC5jCmluZGV4IGI0MWQ5OGE2NGViZi4uNjA2OTNhMWU4 ZTkyIDEwMDY0NAotLS0gYS9rZXJuZWwvbWVtcmVtYXAuYworKysgYi9rZXJuZWwvbWVtcmVtYXAu YwpAQCAtMjksNyArMjksNyBAQCBzdGF0aWMgdm9pZCBkZXZfcGFnZW1hcF9wdXRfb3BzKHZvaWQg KmRhdGEpCiAKIHN0YXRpYyBpbnQgZGV2X3BhZ2VtYXBfZ2V0X29wcyhzdHJ1Y3QgZGV2aWNlICpk ZXYsIHN0cnVjdCBkZXZfcGFnZW1hcCAqcGdtYXApCiB7Ci0JaWYgKCFwZ21hcC0+b3BzLT5wYWdl X2ZyZWUpIHsKKwlpZiAoIXBnbWFwLT5vcHMgfHwgIXBnbWFwLT5vcHMtPnBhZ2VfZnJlZSkgewog CQlXQVJOKDEsICJNaXNzaW5nIHBhZ2VfZnJlZSBtZXRob2RcbiIpOwogCQlyZXR1cm4gLUVJTlZB TDsKIAl9CkBAIC03NSw2ICs3NSwyNCBAQCBzdGF0aWMgdW5zaWduZWQgbG9uZyBwZm5fbmV4dCh1 bnNpZ25lZCBsb25nIHBmbikKICNkZWZpbmUgZm9yX2VhY2hfZGV2aWNlX3BmbihwZm4sIG1hcCkg XAogCWZvciAocGZuID0gcGZuX2ZpcnN0KG1hcCk7IHBmbiA8IHBmbl9lbmQobWFwKTsgcGZuID0g cGZuX25leHQocGZuKSkKIAorc3RhdGljIHZvaWQgZGV2X3BhZ2VtYXBfa2lsbChzdHJ1Y3QgZGV2 X3BhZ2VtYXAgKnBnbWFwKQoreworCWlmIChwZ21hcC0+b3BzICYmIHBnbWFwLT5vcHMtPmtpbGwp CisJCXBnbWFwLT5vcHMtPmtpbGwocGdtYXApOworCWVsc2UKKwkJcGVyY3B1X3JlZl9raWxsKHBn bWFwLT5yZWYpOworfQorCitzdGF0aWMgdm9pZCBkZXZfcGFnZW1hcF9jbGVhbnVwKHN0cnVjdCBk ZXZfcGFnZW1hcCAqcGdtYXApCit7CisJaWYgKHBnbWFwLT5vcHMgJiYgcGdtYXAtPm9wcy0+Y2xl YW51cCkgeworCQlwZ21hcC0+b3BzLT5jbGVhbnVwKHBnbWFwKTsKKwl9IGVsc2UgeworCQl3YWl0 X2Zvcl9jb21wbGV0aW9uKCZwZ21hcC0+ZG9uZSk7CisJCXBlcmNwdV9yZWZfZXhpdChwZ21hcC0+ cmVmKTsKKwl9Cit9CisKIHN0YXRpYyB2b2lkIGRldm1fbWVtcmVtYXBfcGFnZXNfcmVsZWFzZSh2 b2lkICpkYXRhKQogewogCXN0cnVjdCBkZXZfcGFnZW1hcCAqcGdtYXAgPSBkYXRhOwpAQCAtODQs MTAgKzEwMiwxMCBAQCBzdGF0aWMgdm9pZCBkZXZtX21lbXJlbWFwX3BhZ2VzX3JlbGVhc2Uodm9p ZCAqZGF0YSkKIAl1bnNpZ25lZCBsb25nIHBmbjsKIAlpbnQgbmlkOwogCi0JcGdtYXAtPm9wcy0+ a2lsbChwZ21hcCk7CisJZGV2X3BhZ2VtYXBfa2lsbChwZ21hcCk7CiAJZm9yX2VhY2hfZGV2aWNl X3BmbihwZm4sIHBnbWFwKQogCQlwdXRfcGFnZShwZm5fdG9fcGFnZShwZm4pKTsKLQlwZ21hcC0+ b3BzLT5jbGVhbnVwKHBnbWFwKTsKKwlkZXZfcGFnZW1hcF9jbGVhbnVwKHBnbWFwKTsKIAogCS8q IHBhZ2VzIGFyZSBkZWFkIGFuZCB1bnVzZWQsIHVuZG8gdGhlIGFyY2ggbWFwcGluZyAqLwogCWFs aWduX3N0YXJ0ID0gcmVzLT5zdGFydCAmIH4oU0VDVElPTl9TSVpFIC0gMSk7CkBAIC0xMTQsMjAg KzEzMiwyOSBAQCBzdGF0aWMgdm9pZCBkZXZtX21lbXJlbWFwX3BhZ2VzX3JlbGVhc2Uodm9pZCAq ZGF0YSkKIAkJICAgICAgIiVzOiBmYWlsZWQgdG8gZnJlZSBhbGwgcmVzZXJ2ZWQgcGFnZXNcbiIs IF9fZnVuY19fKTsKIH0KIAorc3RhdGljIHZvaWQgZGV2X3BhZ2VtYXBfcGVyY3B1X3JlbGVhc2Uo c3RydWN0IHBlcmNwdV9yZWYgKnJlZikKK3sKKwlzdHJ1Y3QgZGV2X3BhZ2VtYXAgKnBnbWFwID0K KwkJY29udGFpbmVyX29mKHJlZiwgc3RydWN0IGRldl9wYWdlbWFwLCBpbnRlcm5hbF9yZWYpOwor CisJY29tcGxldGUoJnBnbWFwLT5kb25lKTsKK30KKwogLyoqCiAgKiBkZXZtX21lbXJlbWFwX3Bh Z2VzIC0gcmVtYXAgYW5kIHByb3ZpZGUgbWVtbWFwIGJhY2tpbmcgZm9yIHRoZSBnaXZlbiByZXNv dXJjZQogICogQGRldjogaG9zdGluZyBkZXZpY2UgZm9yIEByZXMKICAqIEBwZ21hcDogcG9pbnRl ciB0byBhIHN0cnVjdCBkZXZfcGFnZW1hcAogICoKICAqIE5vdGVzOgotICogMS8gQXQgYSBtaW5p bXVtIHRoZSByZXMsIHJlZiBhbmQgdHlwZSBhbmQgb3BzIG1lbWJlcnMgb2YgQHBnbWFwIG11c3Qg YmUKLSAqICAgIGluaXRpYWxpemVkIGJ5IHRoZSBjYWxsZXIgYmVmb3JlIHBhc3NpbmcgaXQgdG8g dGhpcyBmdW5jdGlvbgorICogMS8gQXQgYSBtaW5pbXVtIHRoZSByZXMgYW5kIHR5cGUgbWVtYmVy cyBvZiBAcGdtYXAgbXVzdCBiZSBpbml0aWFsaXplZAorICogICAgYnkgdGhlIGNhbGxlciBiZWZv cmUgcGFzc2luZyBpdCB0byB0aGlzIGZ1bmN0aW9uCiAgKgogICogMi8gVGhlIGFsdG1hcCBmaWVs ZCBtYXkgb3B0aW9uYWxseSBiZSBpbml0aWFsaXplZCwgaW4gd2hpY2ggY2FzZQogICogICAgUEdN QVBfQUxUTUFQX1ZBTElEIG11c3QgYmUgc2V0IGluIHBnbWFwLT5mbGFncy4KICAqCi0gKiAzLyBw Z21hcC0+cmVmIG11c3QgYmUgJ2xpdmUnIG9uIGVudHJ5IGFuZCB3aWxsIGJlIGtpbGxlZCBhbmQg cmVhcGVkCi0gKiAgICBhdCBkZXZtX21lbXJlbWFwX3BhZ2VzX3JlbGVhc2UoKSB0aW1lLCBvciBp ZiB0aGlzIHJvdXRpbmUgZmFpbHMuCisgKiAzLyBUaGUgcmVmIGZpZWxkIG1heSBvcHRpb25hbGx5 IGJlIHByb3ZpZGVkLCBpbiB3aGljaCBwZ21hcC0+cmVmIG11c3QgYmUKKyAqICAgICdsaXZlJyBv biBlbnRyeSBhbmQgd2lsbCBiZSBraWxsZWQgYW5kIHJlYXBlZCBhdAorICogICAgZGV2bV9tZW1y ZW1hcF9wYWdlc19yZWxlYXNlKCkgdGltZSwgb3IgaWYgdGhpcyByb3V0aW5lIGZhaWxzLgogICoK ICAqIDQvIHJlcyBpcyBleHBlY3RlZCB0byBiZSBhIGhvc3QgbWVtb3J5IHJhbmdlIHRoYXQgY291 bGQgZmVhc2libHkgYmUKICAqICAgIHRyZWF0ZWQgYXMgYSAiU3lzdGVtIFJBTSIgcmFuZ2UsIGku ZS4gbm90IGEgZGV2aWNlIG1taW8gcmFuZ2UsIGJ1dApAQCAtMTc4LDEwICsyMDUsMjEgQEAgdm9p ZCAqZGV2bV9tZW1yZW1hcF9wYWdlcyhzdHJ1Y3QgZGV2aWNlICpkZXYsIHN0cnVjdCBkZXZfcGFn ZW1hcCAqcGdtYXApCiAJCWJyZWFrOwogCX0KIAotCWlmICghcGdtYXAtPnJlZiB8fCAhcGdtYXAt Pm9wcyB8fCAhcGdtYXAtPm9wcy0+a2lsbCB8fAotCSAgICAhcGdtYXAtPm9wcy0+Y2xlYW51cCkg ewotCQlXQVJOKDEsICJNaXNzaW5nIHJlZmVyZW5jZSBjb3VudCB0ZWFyZG93biBkZWZpbml0aW9u XG4iKTsKLQkJcmV0dXJuIEVSUl9QVFIoLUVJTlZBTCk7CisJaWYgKCFwZ21hcC0+cmVmKSB7CisJ CWlmIChwZ21hcC0+b3BzICYmIChwZ21hcC0+b3BzLT5raWxsIHx8IHBnbWFwLT5vcHMtPmNsZWFu dXApKQorCQkJcmV0dXJuIEVSUl9QVFIoLUVJTlZBTCk7CisKKwkJaW5pdF9jb21wbGV0aW9uKCZw Z21hcC0+ZG9uZSk7CisJCWVycm9yID0gcGVyY3B1X3JlZl9pbml0KCZwZ21hcC0+aW50ZXJuYWxf cmVmLAorCQkJCWRldl9wYWdlbWFwX3BlcmNwdV9yZWxlYXNlLCAwLCBHRlBfS0VSTkVMKTsKKwkJ aWYgKGVycm9yKQorCQkJcmV0dXJuIEVSUl9QVFIoZXJyb3IpOworCQlwZ21hcC0+cmVmID0gJnBn bWFwLT5pbnRlcm5hbF9yZWY7CisJfSBlbHNlIHsKKwkJaWYgKCFwZ21hcC0+b3BzIHx8ICFwZ21h cC0+b3BzLT5raWxsIHx8ICFwZ21hcC0+b3BzLT5jbGVhbnVwKSB7CisJCQlXQVJOKDEsICJNaXNz aW5nIHJlZmVyZW5jZSBjb3VudCB0ZWFyZG93biBkZWZpbml0aW9uXG4iKTsKKwkJCXJldHVybiBF UlJfUFRSKC1FSU5WQUwpOworCQl9CiAJfQogCiAJaWYgKHBnbWFwLT50eXBlICE9IE1FTU9SWV9E RVZJQ0VfUENJX1AyUERNQSkgewpAQCAtMjk5LDggKzMzNyw4IEBAIHZvaWQgKmRldm1fbWVtcmVt YXBfcGFnZXMoc3RydWN0IGRldmljZSAqZGV2LCBzdHJ1Y3QgZGV2X3BhZ2VtYXAgKnBnbWFwKQog IGVycl9wZm5fcmVtYXA6CiAJcGdtYXBfYXJyYXlfZGVsZXRlKHJlcyk7CiAgZXJyX2FycmF5Ogot CXBnbWFwLT5vcHMtPmtpbGwocGdtYXApOwotCXBnbWFwLT5vcHMtPmNsZWFudXAocGdtYXApOwor CWRldl9wYWdlbWFwX2tpbGwocGdtYXApOworCWRldl9wYWdlbWFwX2NsZWFudXAocGdtYXApOwog CXJldHVybiBFUlJfUFRSKGVycm9yKTsKIH0KIEVYUE9SVF9TWU1CT0xfR1BMKGRldm1fbWVtcmVt YXBfcGFnZXMpOwpkaWZmIC0tZ2l0IGEvdG9vbHMvdGVzdGluZy9udmRpbW0vdGVzdC9pb21hcC5j IGIvdG9vbHMvdGVzdGluZy9udmRpbW0vdGVzdC9pb21hcC5jCmluZGV4IDNhMWZhNzczNWY0Ny4u OGNkOWI5ODczYTdmIDEwMDY0NAotLS0gYS90b29scy90ZXN0aW5nL252ZGltbS90ZXN0L2lvbWFw LmMKKysrIGIvdG9vbHMvdGVzdGluZy9udmRpbW0vdGVzdC9pb21hcC5jCkBAIC0xMDYsMTAgKzEw NiwxOSBAQCBFWFBPUlRfU1lNQk9MKF9fd3JhcF9kZXZtX21lbXJlbWFwKTsKIAogc3RhdGljIHZv aWQgbmZpdF90ZXN0X2tpbGwodm9pZCAqX3BnbWFwKQogewotCVdBUk5fT04oIXBnbWFwIHx8ICFw Z21hcC0+cmVmIHx8ICFwZ21hcC0+b3BzLT5raWxsIHx8Ci0JCSFwZ21hcC0+b3BzLT5jbGVhbnVw KTsKLQlwZ21hcC0+b3BzLT5raWxsKHBnbWFwKTsKLQlwZ21hcC0+b3BzLT5jbGVhbnVwKHBnbWFw KTsKKwlXQVJOX09OKCFwZ21hcCB8fCAhcGdtYXAtPnJlZikKKworCWlmIChwZ21hcC0+b3BzICYm IHBnbWFwLT5vcHMtPmtpbGwpCisJCXBnbWFwLT5vcHMtPmtpbGwocGdtYXApOworCWVsc2UKKwkJ cGVyY3B1X3JlZl9raWxsKHBnbWFwLT5yZWYpOworCisJaWYgKHBnbWFwLT5vcHMgJiYgcGdtYXAt Pm9wcy0+Y2xlYW51cCkgeworCQlwZ21hcC0+b3BzLT5jbGVhbnVwKHBnbWFwKTsKKwl9IGVsc2Ug eworCQl3YWl0X2Zvcl9jb21wbGV0aW9uKCZwZ21hcC0+ZG9uZSk7CisJCXBlcmNwdV9yZWZfZXhp dChwZ21hcC0+cmVmKTsKKwl9CiB9CiAKIHZvaWQgKl9fd3JhcF9kZXZtX21lbXJlbWFwX3BhZ2Vz KHN0cnVjdCBkZXZpY2UgKmRldiwgc3RydWN0IGRldl9wYWdlbWFwICpwZ21hcCkKLS0gCjIuMjAu MQoKX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX18KTm91dmVh dSBtYWlsaW5nIGxpc3QKTm91dmVhdUBsaXN0cy5mcmVlZGVza3RvcC5vcmcKaHR0cHM6Ly9saXN0 cy5mcmVlZGVza3RvcC5vcmcvbWFpbG1hbi9saXN0aW5mby9ub3V2ZWF1