From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from bombadil.infradead.org (bombadil.infradead.org [IPv6:2607:7c80:54:e::133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ml01.01.org (Postfix) with ESMTPS id CCD0A212AAB96 for ; Wed, 26 Jun 2019 05:28:20 -0700 (PDT) From: Christoph Hellwig Subject: [PATCH 19/25] nouveau: use devm_memremap_pages directly Date: Wed, 26 Jun 2019 14:27:18 +0200 Message-Id: <20190626122724.13313-20-hch@lst.de> In-Reply-To: <20190626122724.13313-1-hch@lst.de> References: <20190626122724.13313-1-hch@lst.de> MIME-Version: 1.0 List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: linux-nvdimm-bounces@lists.01.org Sender: "Linux-nvdimm" To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-nvdimm@lists.01.org, linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-mm@kvack.org, nouveau@lists.freedesktop.org List-ID: Just use devm_memremap_pages instead of hmm_devmem_add pages to allow killing that wrapper which doesn't provide a whole lot of benefits. Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/nouveau/nouveau_dmem.c | 82 ++++++++++++-------------- 1 file changed, 38 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index a50f6fd2fe24..0fb7a44b8bc4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -72,7 +72,8 @@ struct nouveau_dmem_migrate { }; struct nouveau_dmem { - struct hmm_devmem *devmem; + struct nouveau_drm *drm; + struct dev_pagemap pagemap; struct nouveau_dmem_migrate migrate; struct list_head chunk_free; struct list_head chunk_full; @@ -80,6 +81,11 @@ struct nouveau_dmem { struct mutex mutex; }; +static inline struct nouveau_dmem *page_to_dmem(struct page *page) +{ + return container_of(page->pgmap, struct nouveau_dmem, pagemap); +} + struct nouveau_dmem_fault { struct nouveau_drm *drm; struct nouveau_fence *fence; @@ -96,8 +102,7 @@ struct nouveau_migrate { unsigned long dma_nr; }; -static void -nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) +static void nouveau_dmem_page_free(struct page *page) { struct nouveau_dmem_chunk *chunk; unsigned long idx; @@ -260,29 +265,21 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = { .finalize_and_map = nouveau_dmem_fault_finalize_and_map, }; -static vm_fault_t -nouveau_dmem_fault(struct hmm_devmem *devmem, - struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp) +static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) { - struct drm_device *drm_dev = dev_get_drvdata(devmem->device); + struct nouveau_dmem *dmem = page_to_dmem(vmf->page); unsigned long src[1] = {0}, dst[1] = {0}; - struct nouveau_dmem_fault fault = {0}; + struct nouveau_dmem_fault fault = { .drm = dmem->drm }; int ret; - - /* * FIXME what we really want is to find some heuristic to migrate more * than just one page on CPU fault. When such fault happens it is very * likely that more surrounding page will CPU fault too. */ - fault.drm = nouveau_drm(drm_dev); - ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr, - addr + PAGE_SIZE, src, dst, &fault); + ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma, + vmf->address, vmf->address + PAGE_SIZE, + src, dst, &fault); if (ret) return VM_FAULT_SIGBUS; @@ -292,10 +289,9 @@ nouveau_dmem_fault(struct hmm_devmem *devmem, return 0; } -static const struct hmm_devmem_ops -nouveau_dmem_devmem_ops = { - .free = nouveau_dmem_free, - .fault = nouveau_dmem_fault, +static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = { + .page_free = nouveau_dmem_page_free, + .migrate_to_ram = nouveau_dmem_migrate_to_ram, }; static int @@ -581,7 +577,8 @@ void nouveau_dmem_init(struct nouveau_drm *drm) { struct device *device = drm->dev->dev; - unsigned long i, size; + struct resource *res; + unsigned long i, size, pfn_first; int ret; /* This only make sense on PASCAL or newer */ @@ -591,6 +588,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL))) return; + drm->dmem->drm = drm; mutex_init(&drm->dmem->mutex); INIT_LIST_HEAD(&drm->dmem->chunk_free); INIT_LIST_HEAD(&drm->dmem->chunk_full); @@ -600,11 +598,8 @@ nouveau_dmem_init(struct nouveau_drm *drm) /* Initialize migration dma helpers before registering memory */ ret = nouveau_dmem_migrate_init(drm); - if (ret) { - kfree(drm->dmem); - drm->dmem = NULL; - return; - } + if (ret) + goto out_free; /* * FIXME we need some kind of policy to decide how much VRAM we @@ -612,14 +607,16 @@ nouveau_dmem_init(struct nouveau_drm *drm) * and latter if we want to do thing like over commit then we * could revisit this. */ - drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, - device, size); - if (IS_ERR(drm->dmem->devmem)) { - kfree(drm->dmem); - drm->dmem = NULL; - return; - } - + res = devm_request_free_mem_region(device, &iomem_resource, size); + if (IS_ERR(res)) + goto out_free; + drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE; + drm->dmem->pagemap.res = *res; + drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops; + if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap))) + goto out_free; + + pfn_first = res->start >> PAGE_SHIFT; for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) { struct nouveau_dmem_chunk *chunk; struct page *page; @@ -632,8 +629,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) } chunk->drm = drm; - chunk->pfn_first = drm->dmem->devmem->pfn_first; - chunk->pfn_first += (i * DMEM_CHUNK_NPAGES); + chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES); list_add_tail(&chunk->list, &drm->dmem->chunk_empty); page = pfn_to_page(chunk->pfn_first); @@ -643,6 +639,10 @@ nouveau_dmem_init(struct nouveau_drm *drm) } NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20); + return; +out_free: + kfree(drm->dmem); + drm->dmem = NULL; } static void @@ -833,13 +833,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, static inline bool nouveau_dmem_page(struct nouveau_drm *drm, struct page *page) { - if (!is_device_private_page(page)) - return false; - - if (drm->dmem->devmem != page->pgmap->data) - return false; - - return true; + return is_device_private_page(page) && drm->dmem == page_to_dmem(page); } void -- 2.20.1 _______________________________________________ Linux-nvdimm mailing list Linux-nvdimm@lists.01.org https://lists.01.org/mailman/listinfo/linux-nvdimm From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.5 required=3.0 tests=DKIM_INVALID,DKIM_SIGNED, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 233C9C48BD6 for ; Wed, 26 Jun 2019 12:28:25 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id E2F5520B1F for ; Wed, 26 Jun 2019 12:28:24 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=fail reason="signature verification failed" (2048-bit key) header.d=infradead.org header.i=@infradead.org header.b="jgy09Umb" Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727700AbfFZM2X (ORCPT ); Wed, 26 Jun 2019 08:28:23 -0400 Received: from bombadil.infradead.org ([198.137.202.133]:43034 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727676AbfFZM2V (ORCPT ); Wed, 26 Jun 2019 08:28:21 -0400 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=bombadil.20170209; h=Content-Transfer-Encoding: MIME-Version:References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender :Reply-To:Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From :Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=h3EyOBgzIzwY5aTyr8+M/7FbufnJaVYVU0zNDtVIxKY=; b=jgy09UmbLfN+TWf3ytByumWWNU MBpzdiGmKPVtjV+r0URWfeF2E9GRBVEM/c8PnQUO2wZiW0zBK04cKMOZbgGnhwOq4cUSwiniKoMeX nT+P2ZmloBddnkxKnDLCv0p9C9VKIfvJhCYoDMLDhebT7jxtq+qTcxT+DpTV1aEPuh24GfFM+21jc kOYj7R6c4SAui71wSqq81SUZTzHwpTbjiSuAhN1+J8BxHpjAxvSj7bpXQIAp5IJs+sveIvxCpy3e5 M8/sKyXAXPj85iSMU4X7j+qpHu/XpfkJAYPDnm1oVPWTY+HoDT6ptwrKa0Hjb/nKTsoEaSJKqs41g dD55ye6w==; Received: from clnet-p19-102.ikbnet.co.at ([83.175.77.102] helo=localhost) by bombadil.infradead.org with esmtpsa (Exim 4.92 #3 (Red Hat Linux)) id 1hg729-0001ab-Cw; Wed, 26 Jun 2019 12:28:17 +0000 From: Christoph Hellwig To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-mm@kvack.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, linux-nvdimm@lists.01.org, linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH 19/25] nouveau: use devm_memremap_pages directly Date: Wed, 26 Jun 2019 14:27:18 +0200 Message-Id: <20190626122724.13313-20-hch@lst.de> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190626122724.13313-1-hch@lst.de> References: <20190626122724.13313-1-hch@lst.de> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SRS-Rewrite: SMTP reverse-path rewritten from by bombadil.infradead.org. See http://www.infradead.org/rpr.html Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Just use devm_memremap_pages instead of hmm_devmem_add pages to allow killing that wrapper which doesn't provide a whole lot of benefits. Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/nouveau/nouveau_dmem.c | 82 ++++++++++++-------------- 1 file changed, 38 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index a50f6fd2fe24..0fb7a44b8bc4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -72,7 +72,8 @@ struct nouveau_dmem_migrate { }; struct nouveau_dmem { - struct hmm_devmem *devmem; + struct nouveau_drm *drm; + struct dev_pagemap pagemap; struct nouveau_dmem_migrate migrate; struct list_head chunk_free; struct list_head chunk_full; @@ -80,6 +81,11 @@ struct nouveau_dmem { struct mutex mutex; }; +static inline struct nouveau_dmem *page_to_dmem(struct page *page) +{ + return container_of(page->pgmap, struct nouveau_dmem, pagemap); +} + struct nouveau_dmem_fault { struct nouveau_drm *drm; struct nouveau_fence *fence; @@ -96,8 +102,7 @@ struct nouveau_migrate { unsigned long dma_nr; }; -static void -nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) +static void nouveau_dmem_page_free(struct page *page) { struct nouveau_dmem_chunk *chunk; unsigned long idx; @@ -260,29 +265,21 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = { .finalize_and_map = nouveau_dmem_fault_finalize_and_map, }; -static vm_fault_t -nouveau_dmem_fault(struct hmm_devmem *devmem, - struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp) +static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) { - struct drm_device *drm_dev = dev_get_drvdata(devmem->device); + struct nouveau_dmem *dmem = page_to_dmem(vmf->page); unsigned long src[1] = {0}, dst[1] = {0}; - struct nouveau_dmem_fault fault = {0}; + struct nouveau_dmem_fault fault = { .drm = dmem->drm }; int ret; - - /* * FIXME what we really want is to find some heuristic to migrate more * than just one page on CPU fault. When such fault happens it is very * likely that more surrounding page will CPU fault too. */ - fault.drm = nouveau_drm(drm_dev); - ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr, - addr + PAGE_SIZE, src, dst, &fault); + ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma, + vmf->address, vmf->address + PAGE_SIZE, + src, dst, &fault); if (ret) return VM_FAULT_SIGBUS; @@ -292,10 +289,9 @@ nouveau_dmem_fault(struct hmm_devmem *devmem, return 0; } -static const struct hmm_devmem_ops -nouveau_dmem_devmem_ops = { - .free = nouveau_dmem_free, - .fault = nouveau_dmem_fault, +static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = { + .page_free = nouveau_dmem_page_free, + .migrate_to_ram = nouveau_dmem_migrate_to_ram, }; static int @@ -581,7 +577,8 @@ void nouveau_dmem_init(struct nouveau_drm *drm) { struct device *device = drm->dev->dev; - unsigned long i, size; + struct resource *res; + unsigned long i, size, pfn_first; int ret; /* This only make sense on PASCAL or newer */ @@ -591,6 +588,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL))) return; + drm->dmem->drm = drm; mutex_init(&drm->dmem->mutex); INIT_LIST_HEAD(&drm->dmem->chunk_free); INIT_LIST_HEAD(&drm->dmem->chunk_full); @@ -600,11 +598,8 @@ nouveau_dmem_init(struct nouveau_drm *drm) /* Initialize migration dma helpers before registering memory */ ret = nouveau_dmem_migrate_init(drm); - if (ret) { - kfree(drm->dmem); - drm->dmem = NULL; - return; - } + if (ret) + goto out_free; /* * FIXME we need some kind of policy to decide how much VRAM we @@ -612,14 +607,16 @@ nouveau_dmem_init(struct nouveau_drm *drm) * and latter if we want to do thing like over commit then we * could revisit this. */ - drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, - device, size); - if (IS_ERR(drm->dmem->devmem)) { - kfree(drm->dmem); - drm->dmem = NULL; - return; - } - + res = devm_request_free_mem_region(device, &iomem_resource, size); + if (IS_ERR(res)) + goto out_free; + drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE; + drm->dmem->pagemap.res = *res; + drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops; + if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap))) + goto out_free; + + pfn_first = res->start >> PAGE_SHIFT; for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) { struct nouveau_dmem_chunk *chunk; struct page *page; @@ -632,8 +629,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) } chunk->drm = drm; - chunk->pfn_first = drm->dmem->devmem->pfn_first; - chunk->pfn_first += (i * DMEM_CHUNK_NPAGES); + chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES); list_add_tail(&chunk->list, &drm->dmem->chunk_empty); page = pfn_to_page(chunk->pfn_first); @@ -643,6 +639,10 @@ nouveau_dmem_init(struct nouveau_drm *drm) } NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20); + return; +out_free: + kfree(drm->dmem); + drm->dmem = NULL; } static void @@ -833,13 +833,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, static inline bool nouveau_dmem_page(struct nouveau_drm *drm, struct page *page) { - if (!is_device_private_page(page)) - return false; - - if (drm->dmem->devmem != page->pgmap->data) - return false; - - return true; + return is_device_private_page(page) && drm->dmem == page_to_dmem(page); } void -- 2.20.1 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Subject: [PATCH 19/25] nouveau: use devm_memremap_pages directly Date: Wed, 26 Jun 2019 14:27:18 +0200 Message-ID: <20190626122724.13313-20-hch@lst.de> References: <20190626122724.13313-1-hch@lst.de> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: In-Reply-To: <20190626122724.13313-1-hch-jcswGhMUV9g@public.gmane.org> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: nouveau-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org Sender: "Nouveau" To: Dan Williams , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Jason Gunthorpe , Ben Skeggs Cc: linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org, linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org, linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org, nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org List-Id: nouveau.vger.kernel.org SnVzdCB1c2UgZGV2bV9tZW1yZW1hcF9wYWdlcyBpbnN0ZWFkIG9mIGhtbV9kZXZtZW1fYWRkIHBh Z2VzIHRvIGFsbG93CmtpbGxpbmcgdGhhdCB3cmFwcGVyIHdoaWNoIGRvZXNuJ3QgcHJvdmlkZSBh IHdob2xlIGxvdCBvZiBiZW5lZml0cy4KClNpZ25lZC1vZmYtYnk6IENocmlzdG9waCBIZWxsd2ln IDxoY2hAbHN0LmRlPgotLS0KIGRyaXZlcnMvZ3B1L2RybS9ub3V2ZWF1L25vdXZlYXVfZG1lbS5j IHwgODIgKysrKysrKysrKysrLS0tLS0tLS0tLS0tLS0KIDEgZmlsZSBjaGFuZ2VkLCAzOCBpbnNl cnRpb25zKCspLCA0NCBkZWxldGlvbnMoLSkKCmRpZmYgLS1naXQgYS9kcml2ZXJzL2dwdS9kcm0v bm91dmVhdS9ub3V2ZWF1X2RtZW0uYyBiL2RyaXZlcnMvZ3B1L2RybS9ub3V2ZWF1L25vdXZlYXVf ZG1lbS5jCmluZGV4IGE1MGY2ZmQyZmUyNC4uMGZiN2E0NGI4YmM0IDEwMDY0NAotLS0gYS9kcml2 ZXJzL2dwdS9kcm0vbm91dmVhdS9ub3V2ZWF1X2RtZW0uYworKysgYi9kcml2ZXJzL2dwdS9kcm0v bm91dmVhdS9ub3V2ZWF1X2RtZW0uYwpAQCAtNzIsNyArNzIsOCBAQCBzdHJ1Y3Qgbm91dmVhdV9k bWVtX21pZ3JhdGUgewogfTsKIAogc3RydWN0IG5vdXZlYXVfZG1lbSB7Ci0Jc3RydWN0IGhtbV9k ZXZtZW0gKmRldm1lbTsKKwlzdHJ1Y3Qgbm91dmVhdV9kcm0gKmRybTsKKwlzdHJ1Y3QgZGV2X3Bh Z2VtYXAgcGFnZW1hcDsKIAlzdHJ1Y3Qgbm91dmVhdV9kbWVtX21pZ3JhdGUgbWlncmF0ZTsKIAlz dHJ1Y3QgbGlzdF9oZWFkIGNodW5rX2ZyZWU7CiAJc3RydWN0IGxpc3RfaGVhZCBjaHVua19mdWxs OwpAQCAtODAsNiArODEsMTEgQEAgc3RydWN0IG5vdXZlYXVfZG1lbSB7CiAJc3RydWN0IG11dGV4 IG11dGV4OwogfTsKIAorc3RhdGljIGlubGluZSBzdHJ1Y3Qgbm91dmVhdV9kbWVtICpwYWdlX3Rv X2RtZW0oc3RydWN0IHBhZ2UgKnBhZ2UpCit7CisJcmV0dXJuIGNvbnRhaW5lcl9vZihwYWdlLT5w Z21hcCwgc3RydWN0IG5vdXZlYXVfZG1lbSwgcGFnZW1hcCk7Cit9CisKIHN0cnVjdCBub3V2ZWF1 X2RtZW1fZmF1bHQgewogCXN0cnVjdCBub3V2ZWF1X2RybSAqZHJtOwogCXN0cnVjdCBub3V2ZWF1 X2ZlbmNlICpmZW5jZTsKQEAgLTk2LDggKzEwMiw3IEBAIHN0cnVjdCBub3V2ZWF1X21pZ3JhdGUg ewogCXVuc2lnbmVkIGxvbmcgZG1hX25yOwogfTsKIAotc3RhdGljIHZvaWQKLW5vdXZlYXVfZG1l bV9mcmVlKHN0cnVjdCBobW1fZGV2bWVtICpkZXZtZW0sIHN0cnVjdCBwYWdlICpwYWdlKQorc3Rh dGljIHZvaWQgbm91dmVhdV9kbWVtX3BhZ2VfZnJlZShzdHJ1Y3QgcGFnZSAqcGFnZSkKIHsKIAlz dHJ1Y3Qgbm91dmVhdV9kbWVtX2NodW5rICpjaHVuazsKIAl1bnNpZ25lZCBsb25nIGlkeDsKQEAg LTI2MCwyOSArMjY1LDIxIEBAIHN0YXRpYyBjb25zdCBzdHJ1Y3QgbWlncmF0ZV92bWFfb3BzIG5v dXZlYXVfZG1lbV9mYXVsdF9taWdyYXRlX29wcyA9IHsKIAkuZmluYWxpemVfYW5kX21hcAk9IG5v dXZlYXVfZG1lbV9mYXVsdF9maW5hbGl6ZV9hbmRfbWFwLAogfTsKIAotc3RhdGljIHZtX2ZhdWx0 X3QKLW5vdXZlYXVfZG1lbV9mYXVsdChzdHJ1Y3QgaG1tX2Rldm1lbSAqZGV2bWVtLAotCQkgICBz dHJ1Y3Qgdm1fYXJlYV9zdHJ1Y3QgKnZtYSwKLQkJICAgdW5zaWduZWQgbG9uZyBhZGRyLAotCQkg ICBjb25zdCBzdHJ1Y3QgcGFnZSAqcGFnZSwKLQkJICAgdW5zaWduZWQgaW50IGZsYWdzLAotCQkg ICBwbWRfdCAqcG1kcCkKK3N0YXRpYyB2bV9mYXVsdF90IG5vdXZlYXVfZG1lbV9taWdyYXRlX3Rv X3JhbShzdHJ1Y3Qgdm1fZmF1bHQgKnZtZikKIHsKLQlzdHJ1Y3QgZHJtX2RldmljZSAqZHJtX2Rl diA9IGRldl9nZXRfZHJ2ZGF0YShkZXZtZW0tPmRldmljZSk7CisJc3RydWN0IG5vdXZlYXVfZG1l bSAqZG1lbSA9IHBhZ2VfdG9fZG1lbSh2bWYtPnBhZ2UpOwogCXVuc2lnbmVkIGxvbmcgc3JjWzFd ID0gezB9LCBkc3RbMV0gPSB7MH07Ci0Jc3RydWN0IG5vdXZlYXVfZG1lbV9mYXVsdCBmYXVsdCA9 IHswfTsKKwlzdHJ1Y3Qgbm91dmVhdV9kbWVtX2ZhdWx0IGZhdWx0ID0geyAuZHJtID0gZG1lbS0+ ZHJtIH07CiAJaW50IHJldDsKIAotCi0KIAkvKgogCSAqIEZJWE1FIHdoYXQgd2UgcmVhbGx5IHdh bnQgaXMgdG8gZmluZCBzb21lIGhldXJpc3RpYyB0byBtaWdyYXRlIG1vcmUKIAkgKiB0aGFuIGp1 c3Qgb25lIHBhZ2Ugb24gQ1BVIGZhdWx0LiBXaGVuIHN1Y2ggZmF1bHQgaGFwcGVucyBpdCBpcyB2 ZXJ5CiAJICogbGlrZWx5IHRoYXQgbW9yZSBzdXJyb3VuZGluZyBwYWdlIHdpbGwgQ1BVIGZhdWx0 IHRvby4KIAkgKi8KLQlmYXVsdC5kcm0gPSBub3V2ZWF1X2RybShkcm1fZGV2KTsKLQlyZXQgPSBt aWdyYXRlX3ZtYSgmbm91dmVhdV9kbWVtX2ZhdWx0X21pZ3JhdGVfb3BzLCB2bWEsIGFkZHIsCi0J CQkgIGFkZHIgKyBQQUdFX1NJWkUsIHNyYywgZHN0LCAmZmF1bHQpOworCXJldCA9IG1pZ3JhdGVf dm1hKCZub3V2ZWF1X2RtZW1fZmF1bHRfbWlncmF0ZV9vcHMsIHZtZi0+dm1hLAorCQkJdm1mLT5h ZGRyZXNzLCB2bWYtPmFkZHJlc3MgKyBQQUdFX1NJWkUsCisJCQlzcmMsIGRzdCwgJmZhdWx0KTsK IAlpZiAocmV0KQogCQlyZXR1cm4gVk1fRkFVTFRfU0lHQlVTOwogCkBAIC0yOTIsMTAgKzI4OSw5 IEBAIG5vdXZlYXVfZG1lbV9mYXVsdChzdHJ1Y3QgaG1tX2Rldm1lbSAqZGV2bWVtLAogCXJldHVy biAwOwogfQogCi1zdGF0aWMgY29uc3Qgc3RydWN0IGhtbV9kZXZtZW1fb3BzCi1ub3V2ZWF1X2Rt ZW1fZGV2bWVtX29wcyA9IHsKLQkuZnJlZSA9IG5vdXZlYXVfZG1lbV9mcmVlLAotCS5mYXVsdCA9 IG5vdXZlYXVfZG1lbV9mYXVsdCwKK3N0YXRpYyBjb25zdCBzdHJ1Y3QgZGV2X3BhZ2VtYXBfb3Bz IG5vdXZlYXVfZG1lbV9wYWdlbWFwX29wcyA9IHsKKwkucGFnZV9mcmVlCQk9IG5vdXZlYXVfZG1l bV9wYWdlX2ZyZWUsCisJLm1pZ3JhdGVfdG9fcmFtCQk9IG5vdXZlYXVfZG1lbV9taWdyYXRlX3Rv X3JhbSwKIH07CiAKIHN0YXRpYyBpbnQKQEAgLTU4MSw3ICs1NzcsOCBAQCB2b2lkCiBub3V2ZWF1 X2RtZW1faW5pdChzdHJ1Y3Qgbm91dmVhdV9kcm0gKmRybSkKIHsKIAlzdHJ1Y3QgZGV2aWNlICpk ZXZpY2UgPSBkcm0tPmRldi0+ZGV2OwotCXVuc2lnbmVkIGxvbmcgaSwgc2l6ZTsKKwlzdHJ1Y3Qg cmVzb3VyY2UgKnJlczsKKwl1bnNpZ25lZCBsb25nIGksIHNpemUsIHBmbl9maXJzdDsKIAlpbnQg cmV0OwogCiAJLyogVGhpcyBvbmx5IG1ha2Ugc2Vuc2Ugb24gUEFTQ0FMIG9yIG5ld2VyICovCkBA IC01OTEsNiArNTg4LDcgQEAgbm91dmVhdV9kbWVtX2luaXQoc3RydWN0IG5vdXZlYXVfZHJtICpk cm0pCiAJaWYgKCEoZHJtLT5kbWVtID0ga3phbGxvYyhzaXplb2YoKmRybS0+ZG1lbSksIEdGUF9L RVJORUwpKSkKIAkJcmV0dXJuOwogCisJZHJtLT5kbWVtLT5kcm0gPSBkcm07CiAJbXV0ZXhfaW5p dCgmZHJtLT5kbWVtLT5tdXRleCk7CiAJSU5JVF9MSVNUX0hFQUQoJmRybS0+ZG1lbS0+Y2h1bmtf ZnJlZSk7CiAJSU5JVF9MSVNUX0hFQUQoJmRybS0+ZG1lbS0+Y2h1bmtfZnVsbCk7CkBAIC02MDAs MTEgKzU5OCw4IEBAIG5vdXZlYXVfZG1lbV9pbml0KHN0cnVjdCBub3V2ZWF1X2RybSAqZHJtKQog CiAJLyogSW5pdGlhbGl6ZSBtaWdyYXRpb24gZG1hIGhlbHBlcnMgYmVmb3JlIHJlZ2lzdGVyaW5n IG1lbW9yeSAqLwogCXJldCA9IG5vdXZlYXVfZG1lbV9taWdyYXRlX2luaXQoZHJtKTsKLQlpZiAo cmV0KSB7Ci0JCWtmcmVlKGRybS0+ZG1lbSk7Ci0JCWRybS0+ZG1lbSA9IE5VTEw7Ci0JCXJldHVy bjsKLQl9CisJaWYgKHJldCkKKwkJZ290byBvdXRfZnJlZTsKIAogCS8qCiAJICogRklYTUUgd2Ug bmVlZCBzb21lIGtpbmQgb2YgcG9saWN5IHRvIGRlY2lkZSBob3cgbXVjaCBWUkFNIHdlCkBAIC02 MTIsMTQgKzYwNywxNiBAQCBub3V2ZWF1X2RtZW1faW5pdChzdHJ1Y3Qgbm91dmVhdV9kcm0gKmRy bSkKIAkgKiBhbmQgbGF0dGVyIGlmIHdlIHdhbnQgdG8gZG8gdGhpbmcgbGlrZSBvdmVyIGNvbW1p dCB0aGVuIHdlCiAJICogY291bGQgcmV2aXNpdCB0aGlzLgogCSAqLwotCWRybS0+ZG1lbS0+ZGV2 bWVtID0gaG1tX2Rldm1lbV9hZGQoJm5vdXZlYXVfZG1lbV9kZXZtZW1fb3BzLAotCQkJCQkgICBk ZXZpY2UsIHNpemUpOwotCWlmIChJU19FUlIoZHJtLT5kbWVtLT5kZXZtZW0pKSB7Ci0JCWtmcmVl KGRybS0+ZG1lbSk7Ci0JCWRybS0+ZG1lbSA9IE5VTEw7Ci0JCXJldHVybjsKLQl9Ci0KKwlyZXMg PSBkZXZtX3JlcXVlc3RfZnJlZV9tZW1fcmVnaW9uKGRldmljZSwgJmlvbWVtX3Jlc291cmNlLCBz aXplKTsKKwlpZiAoSVNfRVJSKHJlcykpCisJCWdvdG8gb3V0X2ZyZWU7CisJZHJtLT5kbWVtLT5w YWdlbWFwLnR5cGUgPSBNRU1PUllfREVWSUNFX1BSSVZBVEU7CisJZHJtLT5kbWVtLT5wYWdlbWFw LnJlcyA9ICpyZXM7CisJZHJtLT5kbWVtLT5wYWdlbWFwLm9wcyA9ICZub3V2ZWF1X2RtZW1fcGFn ZW1hcF9vcHM7CisJaWYgKElTX0VSUihkZXZtX21lbXJlbWFwX3BhZ2VzKGRldmljZSwgJmRybS0+ ZG1lbS0+cGFnZW1hcCkpKQorCQlnb3RvIG91dF9mcmVlOworCisJcGZuX2ZpcnN0ID0gcmVzLT5z dGFydCA+PiBQQUdFX1NISUZUOwogCWZvciAoaSA9IDA7IGkgPCAoc2l6ZSAvIERNRU1fQ0hVTktf U0laRSk7ICsraSkgewogCQlzdHJ1Y3Qgbm91dmVhdV9kbWVtX2NodW5rICpjaHVuazsKIAkJc3Ry dWN0IHBhZ2UgKnBhZ2U7CkBAIC02MzIsOCArNjI5LDcgQEAgbm91dmVhdV9kbWVtX2luaXQoc3Ry dWN0IG5vdXZlYXVfZHJtICpkcm0pCiAJCX0KIAogCQljaHVuay0+ZHJtID0gZHJtOwotCQljaHVu ay0+cGZuX2ZpcnN0ID0gZHJtLT5kbWVtLT5kZXZtZW0tPnBmbl9maXJzdDsKLQkJY2h1bmstPnBm bl9maXJzdCArPSAoaSAqIERNRU1fQ0hVTktfTlBBR0VTKTsKKwkJY2h1bmstPnBmbl9maXJzdCA9 IHBmbl9maXJzdCArIChpICogRE1FTV9DSFVOS19OUEFHRVMpOwogCQlsaXN0X2FkZF90YWlsKCZj aHVuay0+bGlzdCwgJmRybS0+ZG1lbS0+Y2h1bmtfZW1wdHkpOwogCiAJCXBhZ2UgPSBwZm5fdG9f cGFnZShjaHVuay0+cGZuX2ZpcnN0KTsKQEAgLTY0Myw2ICs2MzksMTAgQEAgbm91dmVhdV9kbWVt X2luaXQoc3RydWN0IG5vdXZlYXVfZHJtICpkcm0pCiAJfQogCiAJTlZfSU5GTyhkcm0sICJETUVN OiByZWdpc3RlcmVkICVsZE1CIG9mIGRldmljZSBtZW1vcnlcbiIsIHNpemUgPj4gMjApOworCXJl dHVybjsKK291dF9mcmVlOgorCWtmcmVlKGRybS0+ZG1lbSk7CisJZHJtLT5kbWVtID0gTlVMTDsK IH0KIAogc3RhdGljIHZvaWQKQEAgLTgzMywxMyArODMzLDcgQEAgbm91dmVhdV9kbWVtX21pZ3Jh dGVfdm1hKHN0cnVjdCBub3V2ZWF1X2RybSAqZHJtLAogc3RhdGljIGlubGluZSBib29sCiBub3V2 ZWF1X2RtZW1fcGFnZShzdHJ1Y3Qgbm91dmVhdV9kcm0gKmRybSwgc3RydWN0IHBhZ2UgKnBhZ2Up CiB7Ci0JaWYgKCFpc19kZXZpY2VfcHJpdmF0ZV9wYWdlKHBhZ2UpKQotCQlyZXR1cm4gZmFsc2U7 Ci0KLQlpZiAoZHJtLT5kbWVtLT5kZXZtZW0gIT0gcGFnZS0+cGdtYXAtPmRhdGEpCi0JCXJldHVy biBmYWxzZTsKLQotCXJldHVybiB0cnVlOworCXJldHVybiBpc19kZXZpY2VfcHJpdmF0ZV9wYWdl KHBhZ2UpICYmIGRybS0+ZG1lbSA9PSBwYWdlX3RvX2RtZW0ocGFnZSk7CiB9CiAKIHZvaWQKLS0g CjIuMjAuMQoKX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX18K Tm91dmVhdSBtYWlsaW5nIGxpc3QKTm91dmVhdUBsaXN0cy5mcmVlZGVza3RvcC5vcmcKaHR0cHM6 Ly9saXN0cy5mcmVlZGVza3RvcC5vcmcvbWFpbG1hbi9saXN0aW5mby9ub3V2ZWF1