From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from bombadil.infradead.org (bombadil.infradead.org [IPv6:2607:7c80:54:e::133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ml01.01.org (Postfix) with ESMTPS id 2B526212AAB84 for ; Wed, 26 Jun 2019 05:28:29 -0700 (PDT) From: Christoph Hellwig Subject: [PATCH 22/25] mm: simplify ZONE_DEVICE page private data Date: Wed, 26 Jun 2019 14:27:21 +0200 Message-Id: <20190626122724.13313-23-hch@lst.de> In-Reply-To: <20190626122724.13313-1-hch@lst.de> References: <20190626122724.13313-1-hch@lst.de> MIME-Version: 1.0 List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: linux-nvdimm-bounces@lists.01.org Sender: "Linux-nvdimm" To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-nvdimm@lists.01.org, linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-mm@kvack.org, nouveau@lists.freedesktop.org List-ID: Remove the clumsy hmm_devmem_page_{get,set}_drvdata helpers, and instead just access the page directly. Also make the page data a void pointer, and thus much easier to use. Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/nouveau/nouveau_dmem.c | 18 ++++++--------- include/linux/hmm.h | 32 -------------------------- include/linux/mm_types.h | 2 +- mm/page_alloc.c | 8 +++---- 4 files changed, 12 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 0fb7a44b8bc4..42c026010938 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -104,11 +104,8 @@ struct nouveau_migrate { static void nouveau_dmem_page_free(struct page *page) { - struct nouveau_dmem_chunk *chunk; - unsigned long idx; - - chunk = (void *)hmm_devmem_page_get_drvdata(page); - idx = page_to_pfn(page) - chunk->pfn_first; + struct nouveau_dmem_chunk *chunk = page->zone_device_data; + unsigned long idx = page_to_pfn(page) - chunk->pfn_first; /* * FIXME: @@ -200,7 +197,7 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma, dst_addr = fault->dma[fault->npages++]; - chunk = (void *)hmm_devmem_page_get_drvdata(spage); + chunk = spage->zone_device_data; src_addr = page_to_pfn(spage) - chunk->pfn_first; src_addr = (src_addr << PAGE_SHIFT) + chunk->bo->bo.offset; @@ -633,9 +630,8 @@ nouveau_dmem_init(struct nouveau_drm *drm) list_add_tail(&chunk->list, &drm->dmem->chunk_empty); page = pfn_to_page(chunk->pfn_first); - for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) { - hmm_devmem_page_set_drvdata(page, (long)chunk); - } + for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) + page->zone_device_data = chunk; } NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20); @@ -698,7 +694,7 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma, if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR) continue; - chunk = (void *)hmm_devmem_page_get_drvdata(dpage); + chunk = dpage->zone_device_data; dst_addr = page_to_pfn(dpage) - chunk->pfn_first; dst_addr = (dst_addr << PAGE_SHIFT) + chunk->bo->bo.offset; @@ -862,7 +858,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm, continue; } - chunk = (void *)hmm_devmem_page_get_drvdata(page); + chunk = page->zone_device_data; addr = page_to_pfn(page) - chunk->pfn_first; addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT; diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 86aa4ec3404c..3d00e9550e77 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -584,36 +584,4 @@ static inline void hmm_mm_destroy(struct mm_struct *mm) {} static inline void hmm_mm_init(struct mm_struct *mm) {} #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) -/* - * hmm_devmem_page_set_drvdata - set per-page driver data field - * - * @page: pointer to struct page - * @data: driver data value to set - * - * Because page can not be on lru we have an unsigned long that driver can use - * to store a per page field. This just a simple helper to do that. - */ -static inline void hmm_devmem_page_set_drvdata(struct page *page, - unsigned long data) -{ - page->hmm_data = data; -} - -/* - * hmm_devmem_page_get_drvdata - get per page driver data field - * - * @page: pointer to struct page - * Return: driver data value - */ -static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page) -{ - return page->hmm_data; -} -#endif /* CONFIG_DEVICE_PRIVATE */ -#else /* IS_ENABLED(CONFIG_HMM) */ -static inline void hmm_mm_destroy(struct mm_struct *mm) {} -static inline void hmm_mm_init(struct mm_struct *mm) {} -#endif /* IS_ENABLED(CONFIG_HMM) */ - #endif /* LINUX_HMM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8ec38b11b361..f33a1289c101 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -158,7 +158,7 @@ struct page { struct { /* ZONE_DEVICE pages */ /** @pgmap: Points to the hosting device page map. */ struct dev_pagemap *pgmap; - unsigned long hmm_data; + void *zone_device_data; unsigned long _zd_pad_1; /* uses mapping */ }; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 17a39d40a556..c0e031c52db5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5886,12 +5886,12 @@ void __ref memmap_init_zone_device(struct zone *zone, __SetPageReserved(page); /* - * ZONE_DEVICE pages union ->lru with a ->pgmap back - * pointer and hmm_data. It is a bug if a ZONE_DEVICE - * page is ever freed or placed on a driver-private list. + * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer + * and zone_device_data. It is a bug if a ZONE_DEVICE page is + * ever freed or placed on a driver-private list. */ page->pgmap = pgmap; - page->hmm_data = 0; + page->zone_device_data = NULL; /* * Mark the block movable so that blocks are reserved for -- 2.20.1 _______________________________________________ Linux-nvdimm mailing list Linux-nvdimm@lists.01.org https://lists.01.org/mailman/listinfo/linux-nvdimm From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.5 required=3.0 tests=DKIM_INVALID,DKIM_SIGNED, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 54582C48BD6 for ; Wed, 26 Jun 2019 12:28:33 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 2E75E20B1F for ; Wed, 26 Jun 2019 12:28:33 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=fail reason="signature verification failed" (2048-bit key) header.d=infradead.org header.i=@infradead.org header.b="qQ/u8z56" Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727739AbfFZM2c (ORCPT ); Wed, 26 Jun 2019 08:28:32 -0400 Received: from bombadil.infradead.org ([198.137.202.133]:43096 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727676AbfFZM23 (ORCPT ); Wed, 26 Jun 2019 08:28:29 -0400 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=bombadil.20170209; h=Content-Transfer-Encoding: MIME-Version:References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender :Reply-To:Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From :Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=U8ovc4WTwnLx7F/LiZCoIjre+/SW/FETirwHxoajq6c=; b=qQ/u8z56xU9mfbqQN6tyeQBlFM ZK9nFZF90cDqpofPjuvNx4qSQiOJWXKAvNK1R4pNh5ZU7sgI9qEokbjwoKNqOq2jdl6uarrX/vc0p hvwUNz1VZ0Ma1qiyc0HQN+WoNz48SUGdFZlf4pPv9ii1mC6pe5F/2u7IxmCniJbsX3uq7jTvLv78S Z4v6+5F9aAkLS0+Vac/orsjkQURZePG57AJddUchAi/8lOdQsc7akhimT4jFDMZNvyH/urTu9zV4n byCoV/htdJTMJbMfUzfUnkyudRhO13bGJRA2vcLzOKbVmOip+aXQKAaaYhQvwH9+qaP12x4YxvJ5C XBpgHV2w==; Received: from clnet-p19-102.ikbnet.co.at ([83.175.77.102] helo=localhost) by bombadil.infradead.org with esmtpsa (Exim 4.92 #3 (Red Hat Linux)) id 1hg72H-0001di-9H; Wed, 26 Jun 2019 12:28:25 +0000 From: Christoph Hellwig To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-mm@kvack.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, linux-nvdimm@lists.01.org, linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH 22/25] mm: simplify ZONE_DEVICE page private data Date: Wed, 26 Jun 2019 14:27:21 +0200 Message-Id: <20190626122724.13313-23-hch@lst.de> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190626122724.13313-1-hch@lst.de> References: <20190626122724.13313-1-hch@lst.de> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SRS-Rewrite: SMTP reverse-path rewritten from by bombadil.infradead.org. See http://www.infradead.org/rpr.html Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Remove the clumsy hmm_devmem_page_{get,set}_drvdata helpers, and instead just access the page directly. Also make the page data a void pointer, and thus much easier to use. Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/nouveau/nouveau_dmem.c | 18 ++++++--------- include/linux/hmm.h | 32 -------------------------- include/linux/mm_types.h | 2 +- mm/page_alloc.c | 8 +++---- 4 files changed, 12 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 0fb7a44b8bc4..42c026010938 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -104,11 +104,8 @@ struct nouveau_migrate { static void nouveau_dmem_page_free(struct page *page) { - struct nouveau_dmem_chunk *chunk; - unsigned long idx; - - chunk = (void *)hmm_devmem_page_get_drvdata(page); - idx = page_to_pfn(page) - chunk->pfn_first; + struct nouveau_dmem_chunk *chunk = page->zone_device_data; + unsigned long idx = page_to_pfn(page) - chunk->pfn_first; /* * FIXME: @@ -200,7 +197,7 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma, dst_addr = fault->dma[fault->npages++]; - chunk = (void *)hmm_devmem_page_get_drvdata(spage); + chunk = spage->zone_device_data; src_addr = page_to_pfn(spage) - chunk->pfn_first; src_addr = (src_addr << PAGE_SHIFT) + chunk->bo->bo.offset; @@ -633,9 +630,8 @@ nouveau_dmem_init(struct nouveau_drm *drm) list_add_tail(&chunk->list, &drm->dmem->chunk_empty); page = pfn_to_page(chunk->pfn_first); - for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) { - hmm_devmem_page_set_drvdata(page, (long)chunk); - } + for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) + page->zone_device_data = chunk; } NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20); @@ -698,7 +694,7 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma, if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR) continue; - chunk = (void *)hmm_devmem_page_get_drvdata(dpage); + chunk = dpage->zone_device_data; dst_addr = page_to_pfn(dpage) - chunk->pfn_first; dst_addr = (dst_addr << PAGE_SHIFT) + chunk->bo->bo.offset; @@ -862,7 +858,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm, continue; } - chunk = (void *)hmm_devmem_page_get_drvdata(page); + chunk = page->zone_device_data; addr = page_to_pfn(page) - chunk->pfn_first; addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT; diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 86aa4ec3404c..3d00e9550e77 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -584,36 +584,4 @@ static inline void hmm_mm_destroy(struct mm_struct *mm) {} static inline void hmm_mm_init(struct mm_struct *mm) {} #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) -/* - * hmm_devmem_page_set_drvdata - set per-page driver data field - * - * @page: pointer to struct page - * @data: driver data value to set - * - * Because page can not be on lru we have an unsigned long that driver can use - * to store a per page field. This just a simple helper to do that. - */ -static inline void hmm_devmem_page_set_drvdata(struct page *page, - unsigned long data) -{ - page->hmm_data = data; -} - -/* - * hmm_devmem_page_get_drvdata - get per page driver data field - * - * @page: pointer to struct page - * Return: driver data value - */ -static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page) -{ - return page->hmm_data; -} -#endif /* CONFIG_DEVICE_PRIVATE */ -#else /* IS_ENABLED(CONFIG_HMM) */ -static inline void hmm_mm_destroy(struct mm_struct *mm) {} -static inline void hmm_mm_init(struct mm_struct *mm) {} -#endif /* IS_ENABLED(CONFIG_HMM) */ - #endif /* LINUX_HMM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8ec38b11b361..f33a1289c101 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -158,7 +158,7 @@ struct page { struct { /* ZONE_DEVICE pages */ /** @pgmap: Points to the hosting device page map. */ struct dev_pagemap *pgmap; - unsigned long hmm_data; + void *zone_device_data; unsigned long _zd_pad_1; /* uses mapping */ }; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 17a39d40a556..c0e031c52db5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5886,12 +5886,12 @@ void __ref memmap_init_zone_device(struct zone *zone, __SetPageReserved(page); /* - * ZONE_DEVICE pages union ->lru with a ->pgmap back - * pointer and hmm_data. It is a bug if a ZONE_DEVICE - * page is ever freed or placed on a driver-private list. + * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer + * and zone_device_data. It is a bug if a ZONE_DEVICE page is + * ever freed or placed on a driver-private list. */ page->pgmap = pgmap; - page->hmm_data = 0; + page->zone_device_data = NULL; /* * Mark the block movable so that blocks are reserved for -- 2.20.1 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Subject: [PATCH 22/25] mm: simplify ZONE_DEVICE page private data Date: Wed, 26 Jun 2019 14:27:21 +0200 Message-ID: <20190626122724.13313-23-hch@lst.de> References: <20190626122724.13313-1-hch@lst.de> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: In-Reply-To: <20190626122724.13313-1-hch-jcswGhMUV9g@public.gmane.org> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: nouveau-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org Sender: "Nouveau" To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org, linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org, linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org, nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org List-Id: nouveau.vger.kernel.org UmVtb3ZlIHRoZSBjbHVtc3kgaG1tX2Rldm1lbV9wYWdlX3tnZXQsc2V0fV9kcnZkYXRhIGhlbHBl cnMsIGFuZAppbnN0ZWFkIGp1c3QgYWNjZXNzIHRoZSBwYWdlIGRpcmVjdGx5LiAgQWxzbyBtYWtl IHRoZSBwYWdlIGRhdGEKYSB2b2lkIHBvaW50ZXIsIGFuZCB0aHVzIG11Y2ggZWFzaWVyIHRvIHVz ZS4KClNpZ25lZC1vZmYtYnk6IENocmlzdG9waCBIZWxsd2lnIDxoY2hAbHN0LmRlPgotLS0KIGRy aXZlcnMvZ3B1L2RybS9ub3V2ZWF1L25vdXZlYXVfZG1lbS5jIHwgMTggKysrKysrLS0tLS0tLS0t CiBpbmNsdWRlL2xpbnV4L2htbS5oICAgICAgICAgICAgICAgICAgICB8IDMyIC0tLS0tLS0tLS0t LS0tLS0tLS0tLS0tLS0tCiBpbmNsdWRlL2xpbnV4L21tX3R5cGVzLmggICAgICAgICAgICAgICB8 ICAyICstCiBtbS9wYWdlX2FsbG9jLmMgICAgICAgICAgICAgICAgICAgICAgICB8ICA4ICsrKy0t LS0KIDQgZmlsZXMgY2hhbmdlZCwgMTIgaW5zZXJ0aW9ucygrKSwgNDggZGVsZXRpb25zKC0pCgpk aWZmIC0tZ2l0IGEvZHJpdmVycy9ncHUvZHJtL25vdXZlYXUvbm91dmVhdV9kbWVtLmMgYi9kcml2 ZXJzL2dwdS9kcm0vbm91dmVhdS9ub3V2ZWF1X2RtZW0uYwppbmRleCAwZmI3YTQ0YjhiYzQuLjQy YzAyNjAxMDkzOCAxMDA2NDQKLS0tIGEvZHJpdmVycy9ncHUvZHJtL25vdXZlYXUvbm91dmVhdV9k bWVtLmMKKysrIGIvZHJpdmVycy9ncHUvZHJtL25vdXZlYXUvbm91dmVhdV9kbWVtLmMKQEAgLTEw NCwxMSArMTA0LDggQEAgc3RydWN0IG5vdXZlYXVfbWlncmF0ZSB7CiAKIHN0YXRpYyB2b2lkIG5v dXZlYXVfZG1lbV9wYWdlX2ZyZWUoc3RydWN0IHBhZ2UgKnBhZ2UpCiB7Ci0Jc3RydWN0IG5vdXZl YXVfZG1lbV9jaHVuayAqY2h1bms7Ci0JdW5zaWduZWQgbG9uZyBpZHg7Ci0KLQljaHVuayA9ICh2 b2lkICopaG1tX2Rldm1lbV9wYWdlX2dldF9kcnZkYXRhKHBhZ2UpOwotCWlkeCA9IHBhZ2VfdG9f cGZuKHBhZ2UpIC0gY2h1bmstPnBmbl9maXJzdDsKKwlzdHJ1Y3Qgbm91dmVhdV9kbWVtX2NodW5r ICpjaHVuayA9IHBhZ2UtPnpvbmVfZGV2aWNlX2RhdGE7CisJdW5zaWduZWQgbG9uZyBpZHggPSBw YWdlX3RvX3BmbihwYWdlKSAtIGNodW5rLT5wZm5fZmlyc3Q7CiAKIAkvKgogCSAqIEZJWE1FOgpA QCAtMjAwLDcgKzE5Nyw3IEBAIG5vdXZlYXVfZG1lbV9mYXVsdF9hbGxvY19hbmRfY29weShzdHJ1 Y3Qgdm1fYXJlYV9zdHJ1Y3QgKnZtYSwKIAogCQlkc3RfYWRkciA9IGZhdWx0LT5kbWFbZmF1bHQt Pm5wYWdlcysrXTsKIAotCQljaHVuayA9ICh2b2lkICopaG1tX2Rldm1lbV9wYWdlX2dldF9kcnZk YXRhKHNwYWdlKTsKKwkJY2h1bmsgPSBzcGFnZS0+em9uZV9kZXZpY2VfZGF0YTsKIAkJc3JjX2Fk ZHIgPSBwYWdlX3RvX3BmbihzcGFnZSkgLSBjaHVuay0+cGZuX2ZpcnN0OwogCQlzcmNfYWRkciA9 IChzcmNfYWRkciA8PCBQQUdFX1NISUZUKSArIGNodW5rLT5iby0+Ym8ub2Zmc2V0OwogCkBAIC02 MzMsOSArNjMwLDggQEAgbm91dmVhdV9kbWVtX2luaXQoc3RydWN0IG5vdXZlYXVfZHJtICpkcm0p CiAJCWxpc3RfYWRkX3RhaWwoJmNodW5rLT5saXN0LCAmZHJtLT5kbWVtLT5jaHVua19lbXB0eSk7 CiAKIAkJcGFnZSA9IHBmbl90b19wYWdlKGNodW5rLT5wZm5fZmlyc3QpOwotCQlmb3IgKGogPSAw OyBqIDwgRE1FTV9DSFVOS19OUEFHRVM7ICsraiwgKytwYWdlKSB7Ci0JCQlobW1fZGV2bWVtX3Bh Z2Vfc2V0X2RydmRhdGEocGFnZSwgKGxvbmcpY2h1bmspOwotCQl9CisJCWZvciAoaiA9IDA7IGog PCBETUVNX0NIVU5LX05QQUdFUzsgKytqLCArK3BhZ2UpCisJCQlwYWdlLT56b25lX2RldmljZV9k YXRhID0gY2h1bms7CiAJfQogCiAJTlZfSU5GTyhkcm0sICJETUVNOiByZWdpc3RlcmVkICVsZE1C IG9mIGRldmljZSBtZW1vcnlcbiIsIHNpemUgPj4gMjApOwpAQCAtNjk4LDcgKzY5NCw3IEBAIG5v dXZlYXVfZG1lbV9taWdyYXRlX2FsbG9jX2FuZF9jb3B5KHN0cnVjdCB2bV9hcmVhX3N0cnVjdCAq dm1hLAogCQlpZiAoIWRwYWdlIHx8IGRzdF9wZm5zW2ldID09IE1JR1JBVEVfUEZOX0VSUk9SKQog CQkJY29udGludWU7CiAKLQkJY2h1bmsgPSAodm9pZCAqKWhtbV9kZXZtZW1fcGFnZV9nZXRfZHJ2 ZGF0YShkcGFnZSk7CisJCWNodW5rID0gZHBhZ2UtPnpvbmVfZGV2aWNlX2RhdGE7CiAJCWRzdF9h ZGRyID0gcGFnZV90b19wZm4oZHBhZ2UpIC0gY2h1bmstPnBmbl9maXJzdDsKIAkJZHN0X2FkZHIg PSAoZHN0X2FkZHIgPDwgUEFHRV9TSElGVCkgKyBjaHVuay0+Ym8tPmJvLm9mZnNldDsKIApAQCAt ODYyLDcgKzg1OCw3IEBAIG5vdXZlYXVfZG1lbV9jb252ZXJ0X3BmbihzdHJ1Y3Qgbm91dmVhdV9k cm0gKmRybSwKIAkJCWNvbnRpbnVlOwogCQl9CiAKLQkJY2h1bmsgPSAodm9pZCAqKWhtbV9kZXZt ZW1fcGFnZV9nZXRfZHJ2ZGF0YShwYWdlKTsKKwkJY2h1bmsgPSBwYWdlLT56b25lX2RldmljZV9k YXRhOwogCQlhZGRyID0gcGFnZV90b19wZm4ocGFnZSkgLSBjaHVuay0+cGZuX2ZpcnN0OwogCQlh ZGRyID0gKGFkZHIgKyBjaHVuay0+Ym8tPmJvLm1lbS5zdGFydCkgPDwgUEFHRV9TSElGVDsKIApk aWZmIC0tZ2l0IGEvaW5jbHVkZS9saW51eC9obW0uaCBiL2luY2x1ZGUvbGludXgvaG1tLmgKaW5k ZXggODZhYTRlYzM0MDRjLi4zZDAwZTk1NTBlNzcgMTAwNjQ0Ci0tLSBhL2luY2x1ZGUvbGludXgv aG1tLmgKKysrIGIvaW5jbHVkZS9saW51eC9obW0uaApAQCAtNTg0LDM2ICs1ODQsNCBAQCBzdGF0 aWMgaW5saW5lIHZvaWQgaG1tX21tX2Rlc3Ryb3koc3RydWN0IG1tX3N0cnVjdCAqbW0pIHt9CiBz dGF0aWMgaW5saW5lIHZvaWQgaG1tX21tX2luaXQoc3RydWN0IG1tX3N0cnVjdCAqbW0pIHt9CiAj ZW5kaWYgLyogSVNfRU5BQkxFRChDT05GSUdfSE1NX01JUlJPUikgKi8KIAotI2lmIElTX0VOQUJM RUQoQ09ORklHX0RFVklDRV9QUklWQVRFKQotLyoKLSAqIGhtbV9kZXZtZW1fcGFnZV9zZXRfZHJ2 ZGF0YSAtIHNldCBwZXItcGFnZSBkcml2ZXIgZGF0YSBmaWVsZAotICoKLSAqIEBwYWdlOiBwb2lu dGVyIHRvIHN0cnVjdCBwYWdlCi0gKiBAZGF0YTogZHJpdmVyIGRhdGEgdmFsdWUgdG8gc2V0Ci0g KgotICogQmVjYXVzZSBwYWdlIGNhbiBub3QgYmUgb24gbHJ1IHdlIGhhdmUgYW4gdW5zaWduZWQg bG9uZyB0aGF0IGRyaXZlciBjYW4gdXNlCi0gKiB0byBzdG9yZSBhIHBlciBwYWdlIGZpZWxkLiBU aGlzIGp1c3QgYSBzaW1wbGUgaGVscGVyIHRvIGRvIHRoYXQuCi0gKi8KLXN0YXRpYyBpbmxpbmUg dm9pZCBobW1fZGV2bWVtX3BhZ2Vfc2V0X2RydmRhdGEoc3RydWN0IHBhZ2UgKnBhZ2UsCi0JCQkJ CSAgICAgICB1bnNpZ25lZCBsb25nIGRhdGEpCi17Ci0JcGFnZS0+aG1tX2RhdGEgPSBkYXRhOwot fQotCi0vKgotICogaG1tX2Rldm1lbV9wYWdlX2dldF9kcnZkYXRhIC0gZ2V0IHBlciBwYWdlIGRy aXZlciBkYXRhIGZpZWxkCi0gKgotICogQHBhZ2U6IHBvaW50ZXIgdG8gc3RydWN0IHBhZ2UKLSAq IFJldHVybjogZHJpdmVyIGRhdGEgdmFsdWUKLSAqLwotc3RhdGljIGlubGluZSB1bnNpZ25lZCBs b25nIGhtbV9kZXZtZW1fcGFnZV9nZXRfZHJ2ZGF0YShjb25zdCBzdHJ1Y3QgcGFnZSAqcGFnZSkK LXsKLQlyZXR1cm4gcGFnZS0+aG1tX2RhdGE7Ci19Ci0jZW5kaWYgLyogQ09ORklHX0RFVklDRV9Q UklWQVRFICovCi0jZWxzZSAvKiBJU19FTkFCTEVEKENPTkZJR19ITU0pICovCi1zdGF0aWMgaW5s aW5lIHZvaWQgaG1tX21tX2Rlc3Ryb3koc3RydWN0IG1tX3N0cnVjdCAqbW0pIHt9Ci1zdGF0aWMg aW5saW5lIHZvaWQgaG1tX21tX2luaXQoc3RydWN0IG1tX3N0cnVjdCAqbW0pIHt9Ci0jZW5kaWYg LyogSVNfRU5BQkxFRChDT05GSUdfSE1NKSAqLwotCiAjZW5kaWYgLyogTElOVVhfSE1NX0ggKi8K ZGlmZiAtLWdpdCBhL2luY2x1ZGUvbGludXgvbW1fdHlwZXMuaCBiL2luY2x1ZGUvbGludXgvbW1f dHlwZXMuaAppbmRleCA4ZWMzOGIxMWIzNjEuLmYzM2ExMjg5YzEwMSAxMDA2NDQKLS0tIGEvaW5j bHVkZS9saW51eC9tbV90eXBlcy5oCisrKyBiL2luY2x1ZGUvbGludXgvbW1fdHlwZXMuaApAQCAt MTU4LDcgKzE1OCw3IEBAIHN0cnVjdCBwYWdlIHsKIAkJc3RydWN0IHsJLyogWk9ORV9ERVZJQ0Ug cGFnZXMgKi8KIAkJCS8qKiBAcGdtYXA6IFBvaW50cyB0byB0aGUgaG9zdGluZyBkZXZpY2UgcGFn ZSBtYXAuICovCiAJCQlzdHJ1Y3QgZGV2X3BhZ2VtYXAgKnBnbWFwOwotCQkJdW5zaWduZWQgbG9u ZyBobW1fZGF0YTsKKwkJCXZvaWQgKnpvbmVfZGV2aWNlX2RhdGE7CiAJCQl1bnNpZ25lZCBsb25n IF96ZF9wYWRfMTsJLyogdXNlcyBtYXBwaW5nICovCiAJCX07CiAKZGlmZiAtLWdpdCBhL21tL3Bh Z2VfYWxsb2MuYyBiL21tL3BhZ2VfYWxsb2MuYwppbmRleCAxN2EzOWQ0MGE1NTYuLmMwZTAzMWM1 MmRiNSAxMDA2NDQKLS0tIGEvbW0vcGFnZV9hbGxvYy5jCisrKyBiL21tL3BhZ2VfYWxsb2MuYwpA QCAtNTg4NiwxMiArNTg4NiwxMiBAQCB2b2lkIF9fcmVmIG1lbW1hcF9pbml0X3pvbmVfZGV2aWNl KHN0cnVjdCB6b25lICp6b25lLAogCQlfX1NldFBhZ2VSZXNlcnZlZChwYWdlKTsKIAogCQkvKgot CQkgKiBaT05FX0RFVklDRSBwYWdlcyB1bmlvbiAtPmxydSB3aXRoIGEgLT5wZ21hcCBiYWNrCi0J CSAqIHBvaW50ZXIgYW5kIGhtbV9kYXRhLiAgSXQgaXMgYSBidWcgaWYgYSBaT05FX0RFVklDRQot CQkgKiBwYWdlIGlzIGV2ZXIgZnJlZWQgb3IgcGxhY2VkIG9uIGEgZHJpdmVyLXByaXZhdGUgbGlz dC4KKwkJICogWk9ORV9ERVZJQ0UgcGFnZXMgdW5pb24gLT5scnUgd2l0aCBhIC0+cGdtYXAgYmFj ayBwb2ludGVyCisJCSAqIGFuZCB6b25lX2RldmljZV9kYXRhLiAgSXQgaXMgYSBidWcgaWYgYSBa T05FX0RFVklDRSBwYWdlIGlzCisJCSAqIGV2ZXIgZnJlZWQgb3IgcGxhY2VkIG9uIGEgZHJpdmVy LXByaXZhdGUgbGlzdC4KIAkJICovCiAJCXBhZ2UtPnBnbWFwID0gcGdtYXA7Ci0JCXBhZ2UtPmht bV9kYXRhID0gMDsKKwkJcGFnZS0+em9uZV9kZXZpY2VfZGF0YSA9IE5VTEw7CiAKIAkJLyoKIAkJ ICogTWFyayB0aGUgYmxvY2sgbW92YWJsZSBzbyB0aGF0IGJsb2NrcyBhcmUgcmVzZXJ2ZWQgZm9y Ci0tIAoyLjIwLjEKCl9fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19f X19fCk5vdXZlYXUgbWFpbGluZyBsaXN0Ck5vdXZlYXVAbGlzdHMuZnJlZWRlc2t0b3Aub3JnCmh0 dHBzOi8vbGlzdHMuZnJlZWRlc2t0b3Aub3JnL21haWxtYW4vbGlzdGluZm8vbm91dmVhdQ==