From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from bombadil.infradead.org ([198.137.202.133]:58124 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752627AbeEPFoy (ORCPT ); Wed, 16 May 2018 01:44:54 -0400 From: Christoph Hellwig To: Souptick Joarder , Matthew Wilcox Cc: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org, devel@lists.orangefs.org, ceph-devel@vger.kernel.org, linux-btrfs@vger.kernel.org, linux-ext4@vger.kernel.org, ocfs2-devel@oss.oracle.com, linux-mtd@lists.infradead.org, dri-devel@lists.freedesktop.org, lustre-devel@lists.lustre.org, linux-arm-kernel@lists.infradead.org, linux-s390@vger.kernel.org Subject: [PATCH 11/14] ttm: separate errno from VM_FAULT_* values Date: Wed, 16 May 2018 07:43:45 +0200 Message-Id: <20180516054348.15950-12-hch@lst.de> In-Reply-To: <20180516054348.15950-1-hch@lst.de> References: <20180516054348.15950-1-hch@lst.de> Sender: linux-btrfs-owner@vger.kernel.org List-ID: Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 42 +++++++++++++++++---------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 8eba95b3c737..255e7801f62c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -43,10 +43,11 @@ #define TTM_BO_VM_NUM_PREFAULT 16 -static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, +static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) { - int ret = 0; + vm_fault_t ret = 0; + int err = 0; if (likely(!bo->moving)) goto out_unlock; @@ -77,8 +78,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * Ordinary wait. */ - ret = dma_fence_wait(bo->moving, true); - if (unlikely(ret != 0)) { + err = dma_fence_wait(bo->moving, true); + if (unlikely(err != 0)) { ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; @@ -104,7 +105,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, + page_offset; } -static int ttm_bo_vm_fault(struct vm_fault *vmf) +static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = (struct ttm_buffer_object *) @@ -115,7 +116,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) unsigned long pfn; struct ttm_tt *ttm = NULL; struct page *page; - int ret; + vm_fault_t ret; + int err; int i; unsigned long address = vmf->address; struct ttm_mem_type_manager *man = @@ -128,9 +130,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) * for reserve, and if it fails, retry the fault after waiting * for the buffer to become unreserved. */ - ret = ttm_bo_reserve(bo, true, true, NULL); - if (unlikely(ret != 0)) { - if (ret != -EBUSY) + err = ttm_bo_reserve(bo, true, true, NULL); + if (unlikely(err != 0)) { + if (err != -EBUSY) return VM_FAULT_NOPAGE; if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { @@ -162,8 +164,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (bdev->driver->fault_reserve_notify) { - ret = bdev->driver->fault_reserve_notify(bo); - switch (ret) { + err = bdev->driver->fault_reserve_notify(bo); + switch (err) { case 0: break; case -EBUSY: @@ -191,13 +193,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) goto out_unlock; } - ret = ttm_mem_io_lock(man, true); - if (unlikely(ret != 0)) { + err = ttm_mem_io_lock(man, true); + if (unlikely(err != 0)) { ret = VM_FAULT_NOPAGE; goto out_unlock; } - ret = ttm_mem_io_reserve_vm(bo); - if (unlikely(ret != 0)) { + err = ttm_mem_io_reserve_vm(bo); + if (unlikely(err != 0)) { ret = VM_FAULT_SIGBUS; goto out_io_unlock; } @@ -265,21 +267,21 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (vma->vm_flags & VM_MIXEDMAP) - ret = vm_insert_mixed(&cvma, address, + err = vm_insert_mixed(&cvma, address, __pfn_to_pfn_t(pfn, PFN_DEV)); else - ret = vm_insert_pfn(&cvma, address, pfn); + err = vm_insert_pfn(&cvma, address, pfn); /* * Somebody beat us to this PTE or prefaulting to * an already populated PTE, or prefaulting error. */ - if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) + if (unlikely((err == -EBUSY) || (err != 0 && i > 0))) break; - else if (unlikely(ret != 0)) { + else if (unlikely(err != 0)) { ret = - (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; + (err == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; goto out_io_unlock; } -- 2.17.0 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Subject: [PATCH 11/14] ttm: separate errno from VM_FAULT_* values Date: Wed, 16 May 2018 07:43:45 +0200 Message-ID: <20180516054348.15950-12-hch@lst.de> References: <20180516054348.15950-1-hch@lst.de> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: In-Reply-To: <20180516054348.15950-1-hch@lst.de> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" List-Archive: List-Post: To: Souptick Joarder , Matthew Wilcox Cc: linux-arm-kernel@lists.infradead.org, linux-s390@vger.kernel.org, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-mm@kvack.org, ocfs2-devel@oss.oracle.com, devel@lists.orangefs.org, linux-fsdevel@vger.kernel.org, linux-mtd@lists.infradead.org, ceph-devel@vger.kernel.org, linux-ext4@vger.kernel.org, linux-btrfs@vger.kernel.org, lustre-devel@lists.lustre.org List-ID: U2lnbmVkLW9mZi1ieTogQ2hyaXN0b3BoIEhlbGx3aWcgPGhjaEBsc3QuZGU+Ci0tLQogZHJpdmVy cy9ncHUvZHJtL3R0bS90dG1fYm9fdm0uYyB8IDQyICsrKysrKysrKysrKysrKysrLS0tLS0tLS0t LS0tLS0tLQogMSBmaWxlIGNoYW5nZWQsIDIyIGluc2VydGlvbnMoKyksIDIwIGRlbGV0aW9ucygt KQoKZGlmZiAtLWdpdCBhL2RyaXZlcnMvZ3B1L2RybS90dG0vdHRtX2JvX3ZtLmMgYi9kcml2ZXJz L2dwdS9kcm0vdHRtL3R0bV9ib192bS5jCmluZGV4IDhlYmE5NWIzYzczNy4uMjU1ZTc4MDFmNjJj IDEwMDY0NAotLS0gYS9kcml2ZXJzL2dwdS9kcm0vdHRtL3R0bV9ib192bS5jCisrKyBiL2RyaXZl cnMvZ3B1L2RybS90dG0vdHRtX2JvX3ZtLmMKQEAgLTQzLDEwICs0MywxMSBAQAogCiAjZGVmaW5l IFRUTV9CT19WTV9OVU1fUFJFRkFVTFQgMTYKIAotc3RhdGljIGludCB0dG1fYm9fdm1fZmF1bHRf aWRsZShzdHJ1Y3QgdHRtX2J1ZmZlcl9vYmplY3QgKmJvLAorc3RhdGljIHZtX2ZhdWx0X3QgdHRt X2JvX3ZtX2ZhdWx0X2lkbGUoc3RydWN0IHR0bV9idWZmZXJfb2JqZWN0ICpibywKIAkJCQlzdHJ1 Y3Qgdm1fZmF1bHQgKnZtZikKIHsKLQlpbnQgcmV0ID0gMDsKKwl2bV9mYXVsdF90IHJldCA9IDA7 CisJaW50IGVyciA9IDA7CiAKIAlpZiAobGlrZWx5KCFiby0+bW92aW5nKSkKIAkJZ290byBvdXRf dW5sb2NrOwpAQCAtNzcsOCArNzgsOCBAQCBzdGF0aWMgaW50IHR0bV9ib192bV9mYXVsdF9pZGxl KHN0cnVjdCB0dG1fYnVmZmVyX29iamVjdCAqYm8sCiAJLyoKIAkgKiBPcmRpbmFyeSB3YWl0Lgog CSAqLwotCXJldCA9IGRtYV9mZW5jZV93YWl0KGJvLT5tb3ZpbmcsIHRydWUpOwotCWlmICh1bmxp a2VseShyZXQgIT0gMCkpIHsKKwllcnIgPSBkbWFfZmVuY2Vfd2FpdChiby0+bW92aW5nLCB0cnVl KTsKKwlpZiAodW5saWtlbHkoZXJyICE9IDApKSB7CiAJCXJldCA9IChyZXQgIT0gLUVSRVNUQVJU U1lTKSA/IFZNX0ZBVUxUX1NJR0JVUyA6CiAJCQlWTV9GQVVMVF9OT1BBR0U7CiAJCWdvdG8gb3V0 X3VubG9jazsKQEAgLTEwNCw3ICsxMDUsNyBAQCBzdGF0aWMgdW5zaWduZWQgbG9uZyB0dG1fYm9f aW9fbWVtX3BmbihzdHJ1Y3QgdHRtX2J1ZmZlcl9vYmplY3QgKmJvLAogCQkrIHBhZ2Vfb2Zmc2V0 OwogfQogCi1zdGF0aWMgaW50IHR0bV9ib192bV9mYXVsdChzdHJ1Y3Qgdm1fZmF1bHQgKnZtZikK K3N0YXRpYyB2bV9mYXVsdF90IHR0bV9ib192bV9mYXVsdChzdHJ1Y3Qgdm1fZmF1bHQgKnZtZikK IHsKIAlzdHJ1Y3Qgdm1fYXJlYV9zdHJ1Y3QgKnZtYSA9IHZtZi0+dm1hOwogCXN0cnVjdCB0dG1f YnVmZmVyX29iamVjdCAqYm8gPSAoc3RydWN0IHR0bV9idWZmZXJfb2JqZWN0ICopCkBAIC0xMTUs NyArMTE2LDggQEAgc3RhdGljIGludCB0dG1fYm9fdm1fZmF1bHQoc3RydWN0IHZtX2ZhdWx0ICp2 bWYpCiAJdW5zaWduZWQgbG9uZyBwZm47CiAJc3RydWN0IHR0bV90dCAqdHRtID0gTlVMTDsKIAlz dHJ1Y3QgcGFnZSAqcGFnZTsKLQlpbnQgcmV0OworCXZtX2ZhdWx0X3QgcmV0OworCWludCBlcnI7 CiAJaW50IGk7CiAJdW5zaWduZWQgbG9uZyBhZGRyZXNzID0gdm1mLT5hZGRyZXNzOwogCXN0cnVj dCB0dG1fbWVtX3R5cGVfbWFuYWdlciAqbWFuID0KQEAgLTEyOCw5ICsxMzAsOSBAQCBzdGF0aWMg aW50IHR0bV9ib192bV9mYXVsdChzdHJ1Y3Qgdm1fZmF1bHQgKnZtZikKIAkgKiBmb3IgcmVzZXJ2 ZSwgYW5kIGlmIGl0IGZhaWxzLCByZXRyeSB0aGUgZmF1bHQgYWZ0ZXIgd2FpdGluZwogCSAqIGZv ciB0aGUgYnVmZmVyIHRvIGJlY29tZSB1bnJlc2VydmVkLgogCSAqLwotCXJldCA9IHR0bV9ib19y ZXNlcnZlKGJvLCB0cnVlLCB0cnVlLCBOVUxMKTsKLQlpZiAodW5saWtlbHkocmV0ICE9IDApKSB7 Ci0JCWlmIChyZXQgIT0gLUVCVVNZKQorCWVyciA9IHR0bV9ib19yZXNlcnZlKGJvLCB0cnVlLCB0 cnVlLCBOVUxMKTsKKwlpZiAodW5saWtlbHkoZXJyICE9IDApKSB7CisJCWlmIChlcnIgIT0gLUVC VVNZKQogCQkJcmV0dXJuIFZNX0ZBVUxUX05PUEFHRTsKIAogCQlpZiAodm1mLT5mbGFncyAmIEZB VUxUX0ZMQUdfQUxMT1dfUkVUUlkpIHsKQEAgLTE2Miw4ICsxNjQsOCBAQCBzdGF0aWMgaW50IHR0 bV9ib192bV9mYXVsdChzdHJ1Y3Qgdm1fZmF1bHQgKnZtZikKIAl9CiAKIAlpZiAoYmRldi0+ZHJp dmVyLT5mYXVsdF9yZXNlcnZlX25vdGlmeSkgewotCQlyZXQgPSBiZGV2LT5kcml2ZXItPmZhdWx0 X3Jlc2VydmVfbm90aWZ5KGJvKTsKLQkJc3dpdGNoIChyZXQpIHsKKwkJZXJyID0gYmRldi0+ZHJp dmVyLT5mYXVsdF9yZXNlcnZlX25vdGlmeShibyk7CisJCXN3aXRjaCAoZXJyKSB7CiAJCWNhc2Ug MDoKIAkJCWJyZWFrOwogCQljYXNlIC1FQlVTWToKQEAgLTE5MSwxMyArMTkzLDEzIEBAIHN0YXRp YyBpbnQgdHRtX2JvX3ZtX2ZhdWx0KHN0cnVjdCB2bV9mYXVsdCAqdm1mKQogCQlnb3RvIG91dF91 bmxvY2s7CiAJfQogCi0JcmV0ID0gdHRtX21lbV9pb19sb2NrKG1hbiwgdHJ1ZSk7Ci0JaWYgKHVu bGlrZWx5KHJldCAhPSAwKSkgeworCWVyciA9IHR0bV9tZW1faW9fbG9jayhtYW4sIHRydWUpOwor CWlmICh1bmxpa2VseShlcnIgIT0gMCkpIHsKIAkJcmV0ID0gVk1fRkFVTFRfTk9QQUdFOwogCQln b3RvIG91dF91bmxvY2s7CiAJfQotCXJldCA9IHR0bV9tZW1faW9fcmVzZXJ2ZV92bShibyk7Ci0J aWYgKHVubGlrZWx5KHJldCAhPSAwKSkgeworCWVyciA9IHR0bV9tZW1faW9fcmVzZXJ2ZV92bShi byk7CisJaWYgKHVubGlrZWx5KGVyciAhPSAwKSkgewogCQlyZXQgPSBWTV9GQVVMVF9TSUdCVVM7 CiAJCWdvdG8gb3V0X2lvX3VubG9jazsKIAl9CkBAIC0yNjUsMjEgKzI2NywyMSBAQCBzdGF0aWMg aW50IHR0bV9ib192bV9mYXVsdChzdHJ1Y3Qgdm1fZmF1bHQgKnZtZikKIAkJfQogCiAJCWlmICh2 bWEtPnZtX2ZsYWdzICYgVk1fTUlYRURNQVApCi0JCQlyZXQgPSB2bV9pbnNlcnRfbWl4ZWQoJmN2 bWEsIGFkZHJlc3MsCisJCQllcnIgPSB2bV9pbnNlcnRfbWl4ZWQoJmN2bWEsIGFkZHJlc3MsCiAJ CQkJCV9fcGZuX3RvX3Bmbl90KHBmbiwgUEZOX0RFVikpOwogCQllbHNlCi0JCQlyZXQgPSB2bV9p bnNlcnRfcGZuKCZjdm1hLCBhZGRyZXNzLCBwZm4pOworCQkJZXJyID0gdm1faW5zZXJ0X3Bmbigm Y3ZtYSwgYWRkcmVzcywgcGZuKTsKIAogCQkvKgogCQkgKiBTb21lYm9keSBiZWF0IHVzIHRvIHRo aXMgUFRFIG9yIHByZWZhdWx0aW5nIHRvCiAJCSAqIGFuIGFscmVhZHkgcG9wdWxhdGVkIFBURSwg b3IgcHJlZmF1bHRpbmcgZXJyb3IuCiAJCSAqLwogCi0JCWlmICh1bmxpa2VseSgocmV0ID09IC1F QlVTWSkgfHwgKHJldCAhPSAwICYmIGkgPiAwKSkpCisJCWlmICh1bmxpa2VseSgoZXJyID09IC1F QlVTWSkgfHwgKGVyciAhPSAwICYmIGkgPiAwKSkpCiAJCQlicmVhazsKLQkJZWxzZSBpZiAodW5s aWtlbHkocmV0ICE9IDApKSB7CisJCWVsc2UgaWYgKHVubGlrZWx5KGVyciAhPSAwKSkgewogCQkJ cmV0ID0KLQkJCSAgICAocmV0ID09IC1FTk9NRU0pID8gVk1fRkFVTFRfT09NIDogVk1fRkFVTFRf U0lHQlVTOworCQkJICAgIChlcnIgPT0gLUVOT01FTSkgPyBWTV9GQVVMVF9PT00gOiBWTV9GQVVM VF9TSUdCVVM7CiAJCQlnb3RvIG91dF9pb191bmxvY2s7CiAJCX0KIAotLSAKMi4xNy4wCgpfX19f X19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fXwpkcmktZGV2ZWwgbWFp bGluZyBsaXN0CmRyaS1kZXZlbEBsaXN0cy5mcmVlZGVza3RvcC5vcmcKaHR0cHM6Ly9saXN0cy5m cmVlZGVza3RvcC5vcmcvbWFpbG1hbi9saXN0aW5mby9kcmktZGV2ZWwK From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Date: Wed, 16 May 2018 07:43:45 +0200 Subject: [Ocfs2-devel] [PATCH 11/14] ttm: separate errno from VM_FAULT_* values In-Reply-To: <20180516054348.15950-1-hch@lst.de> References: <20180516054348.15950-1-hch@lst.de> Message-ID: <20180516054348.15950-12-hch@lst.de> List-Id: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: Souptick Joarder , Matthew Wilcox Cc: linux-arm-kernel@lists.infradead.org, linux-s390@vger.kernel.org, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-mm@kvack.org, ocfs2-devel@oss.oracle.com, devel@lists.orangefs.org, linux-fsdevel@vger.kernel.org, linux-mtd@lists.infradead.org, ceph-devel@vger.kernel.org, linux-ext4@vger.kernel.org, linux-btrfs@vger.kernel.org, lustre-devel@lists.lustre.org Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 42 +++++++++++++++++---------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 8eba95b3c737..255e7801f62c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -43,10 +43,11 @@ #define TTM_BO_VM_NUM_PREFAULT 16 -static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, +static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) { - int ret = 0; + vm_fault_t ret = 0; + int err = 0; if (likely(!bo->moving)) goto out_unlock; @@ -77,8 +78,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * Ordinary wait. */ - ret = dma_fence_wait(bo->moving, true); - if (unlikely(ret != 0)) { + err = dma_fence_wait(bo->moving, true); + if (unlikely(err != 0)) { ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; @@ -104,7 +105,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, + page_offset; } -static int ttm_bo_vm_fault(struct vm_fault *vmf) +static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = (struct ttm_buffer_object *) @@ -115,7 +116,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) unsigned long pfn; struct ttm_tt *ttm = NULL; struct page *page; - int ret; + vm_fault_t ret; + int err; int i; unsigned long address = vmf->address; struct ttm_mem_type_manager *man = @@ -128,9 +130,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) * for reserve, and if it fails, retry the fault after waiting * for the buffer to become unreserved. */ - ret = ttm_bo_reserve(bo, true, true, NULL); - if (unlikely(ret != 0)) { - if (ret != -EBUSY) + err = ttm_bo_reserve(bo, true, true, NULL); + if (unlikely(err != 0)) { + if (err != -EBUSY) return VM_FAULT_NOPAGE; if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { @@ -162,8 +164,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (bdev->driver->fault_reserve_notify) { - ret = bdev->driver->fault_reserve_notify(bo); - switch (ret) { + err = bdev->driver->fault_reserve_notify(bo); + switch (err) { case 0: break; case -EBUSY: @@ -191,13 +193,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) goto out_unlock; } - ret = ttm_mem_io_lock(man, true); - if (unlikely(ret != 0)) { + err = ttm_mem_io_lock(man, true); + if (unlikely(err != 0)) { ret = VM_FAULT_NOPAGE; goto out_unlock; } - ret = ttm_mem_io_reserve_vm(bo); - if (unlikely(ret != 0)) { + err = ttm_mem_io_reserve_vm(bo); + if (unlikely(err != 0)) { ret = VM_FAULT_SIGBUS; goto out_io_unlock; } @@ -265,21 +267,21 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (vma->vm_flags & VM_MIXEDMAP) - ret = vm_insert_mixed(&cvma, address, + err = vm_insert_mixed(&cvma, address, __pfn_to_pfn_t(pfn, PFN_DEV)); else - ret = vm_insert_pfn(&cvma, address, pfn); + err = vm_insert_pfn(&cvma, address, pfn); /* * Somebody beat us to this PTE or prefaulting to * an already populated PTE, or prefaulting error. */ - if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) + if (unlikely((err == -EBUSY) || (err != 0 && i > 0))) break; - else if (unlikely(ret != 0)) { + else if (unlikely(err != 0)) { ret = - (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; + (err == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; goto out_io_unlock; } -- 2.17.0 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Date: Wed, 16 May 2018 07:43:45 +0200 Subject: [lustre-devel] [PATCH 11/14] ttm: separate errno from VM_FAULT_* values In-Reply-To: <20180516054348.15950-1-hch@lst.de> References: <20180516054348.15950-1-hch@lst.de> Message-ID: <20180516054348.15950-12-hch@lst.de> List-Id: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: Souptick Joarder , Matthew Wilcox Cc: linux-arm-kernel@lists.infradead.org, linux-s390@vger.kernel.org, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-mm@kvack.org, ocfs2-devel@oss.oracle.com, devel@lists.orangefs.org, linux-fsdevel@vger.kernel.org, linux-mtd@lists.infradead.org, ceph-devel@vger.kernel.org, linux-ext4@vger.kernel.org, linux-btrfs@vger.kernel.org, lustre-devel@lists.lustre.org Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 42 +++++++++++++++++---------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 8eba95b3c737..255e7801f62c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -43,10 +43,11 @@ #define TTM_BO_VM_NUM_PREFAULT 16 -static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, +static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) { - int ret = 0; + vm_fault_t ret = 0; + int err = 0; if (likely(!bo->moving)) goto out_unlock; @@ -77,8 +78,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * Ordinary wait. */ - ret = dma_fence_wait(bo->moving, true); - if (unlikely(ret != 0)) { + err = dma_fence_wait(bo->moving, true); + if (unlikely(err != 0)) { ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; @@ -104,7 +105,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, + page_offset; } -static int ttm_bo_vm_fault(struct vm_fault *vmf) +static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = (struct ttm_buffer_object *) @@ -115,7 +116,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) unsigned long pfn; struct ttm_tt *ttm = NULL; struct page *page; - int ret; + vm_fault_t ret; + int err; int i; unsigned long address = vmf->address; struct ttm_mem_type_manager *man = @@ -128,9 +130,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) * for reserve, and if it fails, retry the fault after waiting * for the buffer to become unreserved. */ - ret = ttm_bo_reserve(bo, true, true, NULL); - if (unlikely(ret != 0)) { - if (ret != -EBUSY) + err = ttm_bo_reserve(bo, true, true, NULL); + if (unlikely(err != 0)) { + if (err != -EBUSY) return VM_FAULT_NOPAGE; if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { @@ -162,8 +164,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (bdev->driver->fault_reserve_notify) { - ret = bdev->driver->fault_reserve_notify(bo); - switch (ret) { + err = bdev->driver->fault_reserve_notify(bo); + switch (err) { case 0: break; case -EBUSY: @@ -191,13 +193,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) goto out_unlock; } - ret = ttm_mem_io_lock(man, true); - if (unlikely(ret != 0)) { + err = ttm_mem_io_lock(man, true); + if (unlikely(err != 0)) { ret = VM_FAULT_NOPAGE; goto out_unlock; } - ret = ttm_mem_io_reserve_vm(bo); - if (unlikely(ret != 0)) { + err = ttm_mem_io_reserve_vm(bo); + if (unlikely(err != 0)) { ret = VM_FAULT_SIGBUS; goto out_io_unlock; } @@ -265,21 +267,21 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (vma->vm_flags & VM_MIXEDMAP) - ret = vm_insert_mixed(&cvma, address, + err = vm_insert_mixed(&cvma, address, __pfn_to_pfn_t(pfn, PFN_DEV)); else - ret = vm_insert_pfn(&cvma, address, pfn); + err = vm_insert_pfn(&cvma, address, pfn); /* * Somebody beat us to this PTE or prefaulting to * an already populated PTE, or prefaulting error. */ - if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) + if (unlikely((err == -EBUSY) || (err != 0 && i > 0))) break; - else if (unlikely(ret != 0)) { + else if (unlikely(err != 0)) { ret = - (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; + (err == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; goto out_io_unlock; } -- 2.17.0 From mboxrd@z Thu Jan 1 00:00:00 1970 From: hch@lst.de (Christoph Hellwig) Date: Wed, 16 May 2018 07:43:45 +0200 Subject: [PATCH 11/14] ttm: separate errno from VM_FAULT_* values In-Reply-To: <20180516054348.15950-1-hch@lst.de> References: <20180516054348.15950-1-hch@lst.de> Message-ID: <20180516054348.15950-12-hch@lst.de> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org Signed-off-by: Christoph Hellwig --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 42 +++++++++++++++++---------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 8eba95b3c737..255e7801f62c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -43,10 +43,11 @@ #define TTM_BO_VM_NUM_PREFAULT 16 -static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, +static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) { - int ret = 0; + vm_fault_t ret = 0; + int err = 0; if (likely(!bo->moving)) goto out_unlock; @@ -77,8 +78,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * Ordinary wait. */ - ret = dma_fence_wait(bo->moving, true); - if (unlikely(ret != 0)) { + err = dma_fence_wait(bo->moving, true); + if (unlikely(err != 0)) { ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; @@ -104,7 +105,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, + page_offset; } -static int ttm_bo_vm_fault(struct vm_fault *vmf) +static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = (struct ttm_buffer_object *) @@ -115,7 +116,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) unsigned long pfn; struct ttm_tt *ttm = NULL; struct page *page; - int ret; + vm_fault_t ret; + int err; int i; unsigned long address = vmf->address; struct ttm_mem_type_manager *man = @@ -128,9 +130,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) * for reserve, and if it fails, retry the fault after waiting * for the buffer to become unreserved. */ - ret = ttm_bo_reserve(bo, true, true, NULL); - if (unlikely(ret != 0)) { - if (ret != -EBUSY) + err = ttm_bo_reserve(bo, true, true, NULL); + if (unlikely(err != 0)) { + if (err != -EBUSY) return VM_FAULT_NOPAGE; if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { @@ -162,8 +164,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (bdev->driver->fault_reserve_notify) { - ret = bdev->driver->fault_reserve_notify(bo); - switch (ret) { + err = bdev->driver->fault_reserve_notify(bo); + switch (err) { case 0: break; case -EBUSY: @@ -191,13 +193,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) goto out_unlock; } - ret = ttm_mem_io_lock(man, true); - if (unlikely(ret != 0)) { + err = ttm_mem_io_lock(man, true); + if (unlikely(err != 0)) { ret = VM_FAULT_NOPAGE; goto out_unlock; } - ret = ttm_mem_io_reserve_vm(bo); - if (unlikely(ret != 0)) { + err = ttm_mem_io_reserve_vm(bo); + if (unlikely(err != 0)) { ret = VM_FAULT_SIGBUS; goto out_io_unlock; } @@ -265,21 +267,21 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) } if (vma->vm_flags & VM_MIXEDMAP) - ret = vm_insert_mixed(&cvma, address, + err = vm_insert_mixed(&cvma, address, __pfn_to_pfn_t(pfn, PFN_DEV)); else - ret = vm_insert_pfn(&cvma, address, pfn); + err = vm_insert_pfn(&cvma, address, pfn); /* * Somebody beat us to this PTE or prefaulting to * an already populated PTE, or prefaulting error. */ - if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) + if (unlikely((err == -EBUSY) || (err != 0 && i > 0))) break; - else if (unlikely(ret != 0)) { + else if (unlikely(err != 0)) { ret = - (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; + (err == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; goto out_io_unlock; } -- 2.17.0