From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Date: Fri, 16 Jun 2017 18:10:17 +0000 Subject: [PATCH 02/44] ibmveth: properly unwind on init errors Message-Id: <20170616181059.19206-3-hch@lst.de> List-Id: References: <20170616181059.19206-1-hch@lst.de> In-Reply-To: <20170616181059.19206-1-hch@lst.de> MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: x86@kernel.org, linux-arm-kernel@lists.infradead.org, xen-devel@lists.xenproject.org, linux-c6x-dev@linux-c6x.org, linux-hexagon@vger.kernel.org, linux-ia64@vger.kernel.org, linux-mips@linux-mips.org, openrisc@lists.librecores.org, linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org, linux-sh@vger.kernel.org, sparclinux@vger.kernel.org, linux-xtensa@linux-xtensa.org, dmaengine@vger.kernel.org, linux-tegra@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-samsung-soc@vger.kernel.org, iommu@lists.linux-foundation.org, netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org That way the driver doesn't have to rely on DMA_ERROR_CODE, which is not a public API and going away. Signed-off-by: Christoph Hellwig Acked-by: David S. Miller --- drivers/net/ethernet/ibm/ibmveth.c | 159 +++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 85 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 72ab7b6bf20b..3ac27f59e595 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -467,56 +467,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) } } -static void ibmveth_cleanup(struct ibmveth_adapter *adapter) -{ - int i; - struct device *dev = &adapter->vdev->dev; - - if (adapter->buffer_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { - dma_unmap_single(dev, adapter->buffer_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->buffer_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->buffer_list_addr); - adapter->buffer_list_addr = NULL; - } - - if (adapter->filter_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->filter_list_dma)) { - dma_unmap_single(dev, adapter->filter_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->filter_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->filter_list_addr); - adapter->filter_list_addr = NULL; - } - - if (adapter->rx_queue.queue_addr != NULL) { - dma_free_coherent(dev, adapter->rx_queue.queue_len, - adapter->rx_queue.queue_addr, - adapter->rx_queue.queue_dma); - adapter->rx_queue.queue_addr = NULL; - } - - for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) - if (adapter->rx_buff_pool[i].active) - ibmveth_free_buffer_pool(adapter, - &adapter->rx_buff_pool[i]); - - if (adapter->bounce_buffer != NULL) { - if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->bounce_buffer_dma, - adapter->netdev->mtu + IBMVETH_BUFF_OH, - DMA_BIDIRECTIONAL); - adapter->bounce_buffer_dma = DMA_ERROR_CODE; - } - kfree(adapter->bounce_buffer); - adapter->bounce_buffer = NULL; - } -} - static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, union ibmveth_buf_desc rxq_desc, u64 mac_address) { @@ -573,14 +523,17 @@ static int ibmveth_open(struct net_device *netdev) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) rxq_entries += adapter->rx_buff_pool[i].size; + rc = -ENOMEM; adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); - adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->buffer_list_addr) { + netdev_err(netdev, "unable to allocate list pages\n"); + goto out; + } - if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { - netdev_err(netdev, "unable to allocate filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->filter_list_addr) { + netdev_err(netdev, "unable to allocate filter pages\n"); + goto out_free_buffer_list; } dev = &adapter->vdev->dev; @@ -590,22 +543,21 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.queue_addr dma_alloc_coherent(dev, adapter->rx_queue.queue_len, &adapter->rx_queue.queue_dma, GFP_KERNEL); - if (!adapter->rx_queue.queue_addr) { - rc = -ENOMEM; - goto err_out; - } + if (!adapter->rx_queue.queue_addr) + goto out_free_filter_list; adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, adapter->buffer_list_dma)) { + netdev_err(netdev, "unable to map buffer list pages\n"); + goto out_free_queue_mem; + } + adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - - if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || - (dma_mapping_error(dev, adapter->filter_list_dma))) { - netdev_err(netdev, "unable to map filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + if (dma_mapping_error(dev, adapter->filter_list_dma)) { + netdev_err(netdev, "unable to map filter list pages\n"); + goto out_unmap_buffer_list; } adapter->rx_queue.index = 0; @@ -636,7 +588,7 @@ static int ibmveth_open(struct net_device *netdev) rxq_desc.desc, mac_address); rc = -ENONET; - goto err_out; + goto out_unmap_filter_list; } for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { @@ -646,7 +598,7 @@ static int ibmveth_open(struct net_device *netdev) netdev_err(netdev, "unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; rc = -ENOMEM; - goto err_out; + goto out_free_buffer_pools; } } @@ -660,22 +612,21 @@ static int ibmveth_open(struct net_device *netdev) lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc = H_BUSY)); - goto err_out; + goto out_free_buffer_pools; } + rc = -ENOMEM; adapter->bounce_buffer kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); - if (!adapter->bounce_buffer) { - rc = -ENOMEM; - goto err_out_free_irq; - } + if (!adapter->bounce_buffer) + goto out_free_irq; + adapter->bounce_buffer_dma dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { netdev_err(netdev, "unable to map bounce buffer\n"); - rc = -ENOMEM; - goto err_out_free_irq; + goto out_free_bounce_buffer; } netdev_dbg(netdev, "initial replenish cycle\n"); @@ -687,10 +638,31 @@ static int ibmveth_open(struct net_device *netdev) return 0; -err_out_free_irq: +out_free_bounce_buffer: + kfree(adapter->bounce_buffer); +out_free_irq: free_irq(netdev->irq, netdev); -err_out: - ibmveth_cleanup(adapter); +out_free_buffer_pools: + while (--i >= 0) { + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + } +out_unmap_filter_list: + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_unmap_buffer_list: + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_free_queue_mem: + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); +out_free_filter_list: + free_page((unsigned long)adapter->filter_list_addr); +out_free_buffer_list: + free_page((unsigned long)adapter->buffer_list_addr); +out: napi_disable(&adapter->napi); return rc; } @@ -698,7 +670,9 @@ static int ibmveth_open(struct net_device *netdev) static int ibmveth_close(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; long lpar_rc; + int i; netdev_dbg(netdev, "close starting\n"); @@ -722,7 +696,27 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_update_rx_no_buffer(adapter); - ibmveth_cleanup(adapter); + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->buffer_list_addr); + + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->filter_list_addr); + + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + + dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + DMA_BIDIRECTIONAL); + kfree(adapter->bounce_buffer); netdev_dbg(netdev, "close complete\n"); @@ -1648,11 +1642,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); - - adapter->buffer_list_dma = DMA_ERROR_CODE; - adapter->filter_list_dma = DMA_ERROR_CODE; - adapter->rx_queue.queue_dma = DMA_ERROR_CODE; - netdev_dbg(netdev, "registering netdev...\n"); ibmveth_set_features(netdev, netdev->features); -- 2.11.0 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Subject: [PATCH 02/44] ibmveth: properly unwind on init errors Date: Fri, 16 Jun 2017 20:10:17 +0200 Message-ID: <20170616181059.19206-3-hch@lst.de> References: <20170616181059.19206-1-hch@lst.de> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: In-Reply-To: <20170616181059.19206-1-hch@lst.de> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" To: x86@kernel.org, linux-arm-kernel@lists.infradead.org, xen-devel@lists.xenproject.org, linux-c6x-dev@linux-c6x.org, linux-hexagon@vger.kernel.org, linux-ia64@vger.kernel.org, linux-mips@linux-mips.org, openrisc@lists.librecores.org, linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org, linux-sh@vger.kernel.org, sparclinux@vger.kernel.org, linux-xtensa@linux-xtensa.org, dmaengine@vger.kernel.org, linux-tegra@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-samsung-soc@vger.kernel.org, iommu@lists.linux-foundation.org, netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org List-Id: linux-tegra@vger.kernel.org VGhhdCB3YXkgdGhlIGRyaXZlciBkb2Vzbid0IGhhdmUgdG8gcmVseSBvbiBETUFfRVJST1JfQ09E RSwgd2hpY2gKaXMgbm90IGEgcHVibGljIEFQSSBhbmQgZ29pbmcgYXdheS4KClNpZ25lZC1vZmYt Ynk6IENocmlzdG9waCBIZWxsd2lnIDxoY2hAbHN0LmRlPgpBY2tlZC1ieTogRGF2aWQgUy4gTWls bGVyIDxkYXZlbUBkYXZlbWxvZnQubmV0PgotLS0KIGRyaXZlcnMvbmV0L2V0aGVybmV0L2libS9p Ym12ZXRoLmMgfCAxNTkgKysrKysrKysrKysrKysrKystLS0tLS0tLS0tLS0tLS0tLS0tLQogMSBm aWxlIGNoYW5nZWQsIDc0IGluc2VydGlvbnMoKyksIDg1IGRlbGV0aW9ucygtKQoKZGlmZiAtLWdp dCBhL2RyaXZlcnMvbmV0L2V0aGVybmV0L2libS9pYm12ZXRoLmMgYi9kcml2ZXJzL25ldC9ldGhl cm5ldC9pYm0vaWJtdmV0aC5jCmluZGV4IDcyYWI3YjZiZjIwYi4uM2FjMjdmNTllNTk1IDEwMDY0 NAotLS0gYS9kcml2ZXJzL25ldC9ldGhlcm5ldC9pYm0vaWJtdmV0aC5jCisrKyBiL2RyaXZlcnMv bmV0L2V0aGVybmV0L2libS9pYm12ZXRoLmMKQEAgLTQ2Nyw1NiArNDY3LDYgQEAgc3RhdGljIHZv aWQgaWJtdmV0aF9yeHFfaGFydmVzdF9idWZmZXIoc3RydWN0IGlibXZldGhfYWRhcHRlciAqYWRh cHRlcikKIAl9CiB9CiAKLXN0YXRpYyB2b2lkIGlibXZldGhfY2xlYW51cChzdHJ1Y3QgaWJtdmV0 aF9hZGFwdGVyICphZGFwdGVyKQotewotCWludCBpOwotCXN0cnVjdCBkZXZpY2UgKmRldiA9ICZh ZGFwdGVyLT52ZGV2LT5kZXY7Ci0KLQlpZiAoYWRhcHRlci0+YnVmZmVyX2xpc3RfYWRkciAhPSBO VUxMKSB7Ci0JCWlmICghZG1hX21hcHBpbmdfZXJyb3IoZGV2LCBhZGFwdGVyLT5idWZmZXJfbGlz dF9kbWEpKSB7Ci0JCQlkbWFfdW5tYXBfc2luZ2xlKGRldiwgYWRhcHRlci0+YnVmZmVyX2xpc3Rf ZG1hLCA0MDk2LAotCQkJCQlETUFfQklESVJFQ1RJT05BTCk7Ci0JCQlhZGFwdGVyLT5idWZmZXJf bGlzdF9kbWEgPSBETUFfRVJST1JfQ09ERTsKLQkJfQotCQlmcmVlX3BhZ2UoKHVuc2lnbmVkIGxv bmcpYWRhcHRlci0+YnVmZmVyX2xpc3RfYWRkcik7Ci0JCWFkYXB0ZXItPmJ1ZmZlcl9saXN0X2Fk ZHIgPSBOVUxMOwotCX0KLQotCWlmIChhZGFwdGVyLT5maWx0ZXJfbGlzdF9hZGRyICE9IE5VTEwp IHsKLQkJaWYgKCFkbWFfbWFwcGluZ19lcnJvcihkZXYsIGFkYXB0ZXItPmZpbHRlcl9saXN0X2Rt YSkpIHsKLQkJCWRtYV91bm1hcF9zaW5nbGUoZGV2LCBhZGFwdGVyLT5maWx0ZXJfbGlzdF9kbWEs IDQwOTYsCi0JCQkJCURNQV9CSURJUkVDVElPTkFMKTsKLQkJCWFkYXB0ZXItPmZpbHRlcl9saXN0 X2RtYSA9IERNQV9FUlJPUl9DT0RFOwotCQl9Ci0JCWZyZWVfcGFnZSgodW5zaWduZWQgbG9uZylh ZGFwdGVyLT5maWx0ZXJfbGlzdF9hZGRyKTsKLQkJYWRhcHRlci0+ZmlsdGVyX2xpc3RfYWRkciA9 IE5VTEw7Ci0JfQotCi0JaWYgKGFkYXB0ZXItPnJ4X3F1ZXVlLnF1ZXVlX2FkZHIgIT0gTlVMTCkg ewotCQlkbWFfZnJlZV9jb2hlcmVudChkZXYsIGFkYXB0ZXItPnJ4X3F1ZXVlLnF1ZXVlX2xlbiwK LQkJCQkgIGFkYXB0ZXItPnJ4X3F1ZXVlLnF1ZXVlX2FkZHIsCi0JCQkJICBhZGFwdGVyLT5yeF9x dWV1ZS5xdWV1ZV9kbWEpOwotCQlhZGFwdGVyLT5yeF9xdWV1ZS5xdWV1ZV9hZGRyID0gTlVMTDsK LQl9Ci0KLQlmb3IgKGkgPSAwOyBpIDwgSUJNVkVUSF9OVU1fQlVGRl9QT09MUzsgaSsrKQotCQlp ZiAoYWRhcHRlci0+cnhfYnVmZl9wb29sW2ldLmFjdGl2ZSkKLQkJCWlibXZldGhfZnJlZV9idWZm ZXJfcG9vbChhZGFwdGVyLAotCQkJCQkJICZhZGFwdGVyLT5yeF9idWZmX3Bvb2xbaV0pOwotCi0J aWYgKGFkYXB0ZXItPmJvdW5jZV9idWZmZXIgIT0gTlVMTCkgewotCQlpZiAoIWRtYV9tYXBwaW5n X2Vycm9yKGRldiwgYWRhcHRlci0+Ym91bmNlX2J1ZmZlcl9kbWEpKSB7Ci0JCQlkbWFfdW5tYXBf c2luZ2xlKCZhZGFwdGVyLT52ZGV2LT5kZXYsCi0JCQkJCWFkYXB0ZXItPmJvdW5jZV9idWZmZXJf ZG1hLAotCQkJCQlhZGFwdGVyLT5uZXRkZXYtPm10dSArIElCTVZFVEhfQlVGRl9PSCwKLQkJCQkJ RE1BX0JJRElSRUNUSU9OQUwpOwotCQkJYWRhcHRlci0+Ym91bmNlX2J1ZmZlcl9kbWEgPSBETUFf RVJST1JfQ09ERTsKLQkJfQotCQlrZnJlZShhZGFwdGVyLT5ib3VuY2VfYnVmZmVyKTsKLQkJYWRh cHRlci0+Ym91bmNlX2J1ZmZlciA9IE5VTEw7Ci0JfQotfQotCiBzdGF0aWMgaW50IGlibXZldGhf cmVnaXN0ZXJfbG9naWNhbF9sYW4oc3RydWN0IGlibXZldGhfYWRhcHRlciAqYWRhcHRlciwKICAg ICAgICAgdW5pb24gaWJtdmV0aF9idWZfZGVzYyByeHFfZGVzYywgdTY0IG1hY19hZGRyZXNzKQog ewpAQCAtNTczLDE0ICs1MjMsMTcgQEAgc3RhdGljIGludCBpYm12ZXRoX29wZW4oc3RydWN0IG5l dF9kZXZpY2UgKm5ldGRldikKIAlmb3IoaSA9IDA7IGkgPCBJQk1WRVRIX05VTV9CVUZGX1BPT0xT OyBpKyspCiAJCXJ4cV9lbnRyaWVzICs9IGFkYXB0ZXItPnJ4X2J1ZmZfcG9vbFtpXS5zaXplOwog CisJcmMgPSAtRU5PTUVNOwogCWFkYXB0ZXItPmJ1ZmZlcl9saXN0X2FkZHIgPSAodm9pZCopIGdl dF96ZXJvZWRfcGFnZShHRlBfS0VSTkVMKTsKLQlhZGFwdGVyLT5maWx0ZXJfbGlzdF9hZGRyID0g KHZvaWQqKSBnZXRfemVyb2VkX3BhZ2UoR0ZQX0tFUk5FTCk7CisJaWYgKCFhZGFwdGVyLT5idWZm ZXJfbGlzdF9hZGRyKSB7CisJCW5ldGRldl9lcnIobmV0ZGV2LCAidW5hYmxlIHRvIGFsbG9jYXRl IGxpc3QgcGFnZXNcbiIpOworCQlnb3RvIG91dDsKKwl9CiAKLQlpZiAoIWFkYXB0ZXItPmJ1ZmZl cl9saXN0X2FkZHIgfHwgIWFkYXB0ZXItPmZpbHRlcl9saXN0X2FkZHIpIHsKLQkJbmV0ZGV2X2Vy cihuZXRkZXYsICJ1bmFibGUgdG8gYWxsb2NhdGUgZmlsdGVyIG9yIGJ1ZmZlciBsaXN0ICIKLQkJ CSAgICJwYWdlc1xuIik7Ci0JCXJjID0gLUVOT01FTTsKLQkJZ290byBlcnJfb3V0OworCWFkYXB0 ZXItPmZpbHRlcl9saXN0X2FkZHIgPSAodm9pZCopIGdldF96ZXJvZWRfcGFnZShHRlBfS0VSTkVM KTsKKwlpZiAoIWFkYXB0ZXItPmZpbHRlcl9saXN0X2FkZHIpIHsKKwkJbmV0ZGV2X2VycihuZXRk ZXYsICJ1bmFibGUgdG8gYWxsb2NhdGUgZmlsdGVyIHBhZ2VzXG4iKTsKKwkJZ290byBvdXRfZnJl ZV9idWZmZXJfbGlzdDsKIAl9CiAKIAlkZXYgPSAmYWRhcHRlci0+dmRldi0+ZGV2OwpAQCAtNTkw LDIyICs1NDMsMjEgQEAgc3RhdGljIGludCBpYm12ZXRoX29wZW4oc3RydWN0IG5ldF9kZXZpY2Ug Km5ldGRldikKIAlhZGFwdGVyLT5yeF9xdWV1ZS5xdWV1ZV9hZGRyID0KIAkJZG1hX2FsbG9jX2Nv aGVyZW50KGRldiwgYWRhcHRlci0+cnhfcXVldWUucXVldWVfbGVuLAogCQkJCSAgICZhZGFwdGVy LT5yeF9xdWV1ZS5xdWV1ZV9kbWEsIEdGUF9LRVJORUwpOwotCWlmICghYWRhcHRlci0+cnhfcXVl dWUucXVldWVfYWRkcikgewotCQlyYyA9IC1FTk9NRU07Ci0JCWdvdG8gZXJyX291dDsKLQl9CisJ aWYgKCFhZGFwdGVyLT5yeF9xdWV1ZS5xdWV1ZV9hZGRyKQorCQlnb3RvIG91dF9mcmVlX2ZpbHRl cl9saXN0OwogCiAJYWRhcHRlci0+YnVmZmVyX2xpc3RfZG1hID0gZG1hX21hcF9zaW5nbGUoZGV2 LAogCQkJYWRhcHRlci0+YnVmZmVyX2xpc3RfYWRkciwgNDA5NiwgRE1BX0JJRElSRUNUSU9OQUwp OworCWlmIChkbWFfbWFwcGluZ19lcnJvcihkZXYsIGFkYXB0ZXItPmJ1ZmZlcl9saXN0X2RtYSkp IHsKKwkJbmV0ZGV2X2VycihuZXRkZXYsICJ1bmFibGUgdG8gbWFwIGJ1ZmZlciBsaXN0IHBhZ2Vz XG4iKTsKKwkJZ290byBvdXRfZnJlZV9xdWV1ZV9tZW07CisJfQorCiAJYWRhcHRlci0+ZmlsdGVy X2xpc3RfZG1hID0gZG1hX21hcF9zaW5nbGUoZGV2LAogCQkJYWRhcHRlci0+ZmlsdGVyX2xpc3Rf YWRkciwgNDA5NiwgRE1BX0JJRElSRUNUSU9OQUwpOwotCi0JaWYgKChkbWFfbWFwcGluZ19lcnJv cihkZXYsIGFkYXB0ZXItPmJ1ZmZlcl9saXN0X2RtYSkpIHx8Ci0JICAgIChkbWFfbWFwcGluZ19l cnJvcihkZXYsIGFkYXB0ZXItPmZpbHRlcl9saXN0X2RtYSkpKSB7Ci0JCW5ldGRldl9lcnIobmV0 ZGV2LCAidW5hYmxlIHRvIG1hcCBmaWx0ZXIgb3IgYnVmZmVyIGxpc3QgIgotCQkJICAgInBhZ2Vz XG4iKTsKLQkJcmMgPSAtRU5PTUVNOwotCQlnb3RvIGVycl9vdXQ7CisJaWYgKGRtYV9tYXBwaW5n X2Vycm9yKGRldiwgYWRhcHRlci0+ZmlsdGVyX2xpc3RfZG1hKSkgeworCQluZXRkZXZfZXJyKG5l dGRldiwgInVuYWJsZSB0byBtYXAgZmlsdGVyIGxpc3QgcGFnZXNcbiIpOworCQlnb3RvIG91dF91 bm1hcF9idWZmZXJfbGlzdDsKIAl9CiAKIAlhZGFwdGVyLT5yeF9xdWV1ZS5pbmRleCA9IDA7CkBA IC02MzYsNyArNTg4LDcgQEAgc3RhdGljIGludCBpYm12ZXRoX29wZW4oc3RydWN0IG5ldF9kZXZp Y2UgKm5ldGRldikKIAkJCQkgICAgIHJ4cV9kZXNjLmRlc2MsCiAJCQkJICAgICBtYWNfYWRkcmVz cyk7CiAJCXJjID0gLUVOT05FVDsKLQkJZ290byBlcnJfb3V0OworCQlnb3RvIG91dF91bm1hcF9m aWx0ZXJfbGlzdDsKIAl9CiAKIAlmb3IgKGkgPSAwOyBpIDwgSUJNVkVUSF9OVU1fQlVGRl9QT09M UzsgaSsrKSB7CkBAIC02NDYsNyArNTk4LDcgQEAgc3RhdGljIGludCBpYm12ZXRoX29wZW4oc3Ry dWN0IG5ldF9kZXZpY2UgKm5ldGRldikKIAkJCW5ldGRldl9lcnIobmV0ZGV2LCAidW5hYmxlIHRv IGFsbG9jIHBvb2xcbiIpOwogCQkJYWRhcHRlci0+cnhfYnVmZl9wb29sW2ldLmFjdGl2ZSA9IDA7 CiAJCQlyYyA9IC1FTk9NRU07Ci0JCQlnb3RvIGVycl9vdXQ7CisJCQlnb3RvIG91dF9mcmVlX2J1 ZmZlcl9wb29sczsKIAkJfQogCX0KIApAQCAtNjYwLDIyICs2MTIsMjEgQEAgc3RhdGljIGludCBp Ym12ZXRoX29wZW4oc3RydWN0IG5ldF9kZXZpY2UgKm5ldGRldikKIAkJCWxwYXJfcmMgPSBoX2Zy ZWVfbG9naWNhbF9sYW4oYWRhcHRlci0+dmRldi0+dW5pdF9hZGRyZXNzKTsKIAkJfSB3aGlsZSAo SF9JU19MT05HX0JVU1kobHBhcl9yYykgfHwgKGxwYXJfcmMgPT0gSF9CVVNZKSk7CiAKLQkJZ290 byBlcnJfb3V0OworCQlnb3RvIG91dF9mcmVlX2J1ZmZlcl9wb29sczsKIAl9CiAKKwlyYyA9IC1F Tk9NRU07CiAJYWRhcHRlci0+Ym91bmNlX2J1ZmZlciA9CiAJICAgIGttYWxsb2MobmV0ZGV2LT5t dHUgKyBJQk1WRVRIX0JVRkZfT0gsIEdGUF9LRVJORUwpOwotCWlmICghYWRhcHRlci0+Ym91bmNl X2J1ZmZlcikgewotCQlyYyA9IC1FTk9NRU07Ci0JCWdvdG8gZXJyX291dF9mcmVlX2lycTsKLQl9 CisJaWYgKCFhZGFwdGVyLT5ib3VuY2VfYnVmZmVyKQorCQlnb3RvIG91dF9mcmVlX2lycTsKKwog CWFkYXB0ZXItPmJvdW5jZV9idWZmZXJfZG1hID0KIAkgICAgZG1hX21hcF9zaW5nbGUoJmFkYXB0 ZXItPnZkZXYtPmRldiwgYWRhcHRlci0+Ym91bmNlX2J1ZmZlciwKIAkJCSAgIG5ldGRldi0+bXR1 ICsgSUJNVkVUSF9CVUZGX09ILCBETUFfQklESVJFQ1RJT05BTCk7CiAJaWYgKGRtYV9tYXBwaW5n X2Vycm9yKGRldiwgYWRhcHRlci0+Ym91bmNlX2J1ZmZlcl9kbWEpKSB7CiAJCW5ldGRldl9lcnIo bmV0ZGV2LCAidW5hYmxlIHRvIG1hcCBib3VuY2UgYnVmZmVyXG4iKTsKLQkJcmMgPSAtRU5PTUVN OwotCQlnb3RvIGVycl9vdXRfZnJlZV9pcnE7CisJCWdvdG8gb3V0X2ZyZWVfYm91bmNlX2J1ZmZl cjsKIAl9CiAKIAluZXRkZXZfZGJnKG5ldGRldiwgImluaXRpYWwgcmVwbGVuaXNoIGN5Y2xlXG4i KTsKQEAgLTY4NywxMCArNjM4LDMxIEBAIHN0YXRpYyBpbnQgaWJtdmV0aF9vcGVuKHN0cnVjdCBu ZXRfZGV2aWNlICpuZXRkZXYpCiAKIAlyZXR1cm4gMDsKIAotZXJyX291dF9mcmVlX2lycToKK291 dF9mcmVlX2JvdW5jZV9idWZmZXI6CisJa2ZyZWUoYWRhcHRlci0+Ym91bmNlX2J1ZmZlcik7Citv dXRfZnJlZV9pcnE6CiAJZnJlZV9pcnEobmV0ZGV2LT5pcnEsIG5ldGRldik7Ci1lcnJfb3V0Ogot CWlibXZldGhfY2xlYW51cChhZGFwdGVyKTsKK291dF9mcmVlX2J1ZmZlcl9wb29sczoKKwl3aGls ZSAoLS1pID49IDApIHsKKwkJaWYgKGFkYXB0ZXItPnJ4X2J1ZmZfcG9vbFtpXS5hY3RpdmUpCisJ CQlpYm12ZXRoX2ZyZWVfYnVmZmVyX3Bvb2woYWRhcHRlciwKKwkJCQkJCSAmYWRhcHRlci0+cnhf YnVmZl9wb29sW2ldKTsKKwl9CitvdXRfdW5tYXBfZmlsdGVyX2xpc3Q6CisJZG1hX3VubWFwX3Np bmdsZShkZXYsIGFkYXB0ZXItPmZpbHRlcl9saXN0X2RtYSwgNDA5NiwKKwkJCSBETUFfQklESVJF Q1RJT05BTCk7CitvdXRfdW5tYXBfYnVmZmVyX2xpc3Q6CisJZG1hX3VubWFwX3NpbmdsZShkZXYs IGFkYXB0ZXItPmJ1ZmZlcl9saXN0X2RtYSwgNDA5NiwKKwkJCSBETUFfQklESVJFQ1RJT05BTCk7 CitvdXRfZnJlZV9xdWV1ZV9tZW06CisJZG1hX2ZyZWVfY29oZXJlbnQoZGV2LCBhZGFwdGVyLT5y eF9xdWV1ZS5xdWV1ZV9sZW4sCisJCQkgIGFkYXB0ZXItPnJ4X3F1ZXVlLnF1ZXVlX2FkZHIsCisJ CQkgIGFkYXB0ZXItPnJ4X3F1ZXVlLnF1ZXVlX2RtYSk7CitvdXRfZnJlZV9maWx0ZXJfbGlzdDoK KwlmcmVlX3BhZ2UoKHVuc2lnbmVkIGxvbmcpYWRhcHRlci0+ZmlsdGVyX2xpc3RfYWRkcik7Citv dXRfZnJlZV9idWZmZXJfbGlzdDoKKwlmcmVlX3BhZ2UoKHVuc2lnbmVkIGxvbmcpYWRhcHRlci0+ YnVmZmVyX2xpc3RfYWRkcik7CitvdXQ6CiAJbmFwaV9kaXNhYmxlKCZhZGFwdGVyLT5uYXBpKTsK IAlyZXR1cm4gcmM7CiB9CkBAIC02OTgsNyArNjcwLDkgQEAgc3RhdGljIGludCBpYm12ZXRoX29w ZW4oc3RydWN0IG5ldF9kZXZpY2UgKm5ldGRldikKIHN0YXRpYyBpbnQgaWJtdmV0aF9jbG9zZShz dHJ1Y3QgbmV0X2RldmljZSAqbmV0ZGV2KQogewogCXN0cnVjdCBpYm12ZXRoX2FkYXB0ZXIgKmFk YXB0ZXIgPSBuZXRkZXZfcHJpdihuZXRkZXYpOworCXN0cnVjdCBkZXZpY2UgKmRldiA9ICZhZGFw dGVyLT52ZGV2LT5kZXY7CiAJbG9uZyBscGFyX3JjOworCWludCBpOwogCiAJbmV0ZGV2X2RiZyhu ZXRkZXYsICJjbG9zZSBzdGFydGluZ1xuIik7CiAKQEAgLTcyMiw3ICs2OTYsMjcgQEAgc3RhdGlj IGludCBpYm12ZXRoX2Nsb3NlKHN0cnVjdCBuZXRfZGV2aWNlICpuZXRkZXYpCiAKIAlpYm12ZXRo X3VwZGF0ZV9yeF9ub19idWZmZXIoYWRhcHRlcik7CiAKLQlpYm12ZXRoX2NsZWFudXAoYWRhcHRl cik7CisJZG1hX3VubWFwX3NpbmdsZShkZXYsIGFkYXB0ZXItPmJ1ZmZlcl9saXN0X2RtYSwgNDA5 NiwKKwkJCSBETUFfQklESVJFQ1RJT05BTCk7CisJZnJlZV9wYWdlKCh1bnNpZ25lZCBsb25nKWFk YXB0ZXItPmJ1ZmZlcl9saXN0X2FkZHIpOworCisJZG1hX3VubWFwX3NpbmdsZShkZXYsIGFkYXB0 ZXItPmZpbHRlcl9saXN0X2RtYSwgNDA5NiwKKwkJCSBETUFfQklESVJFQ1RJT05BTCk7CisJZnJl ZV9wYWdlKCh1bnNpZ25lZCBsb25nKWFkYXB0ZXItPmZpbHRlcl9saXN0X2FkZHIpOworCisJZG1h X2ZyZWVfY29oZXJlbnQoZGV2LCBhZGFwdGVyLT5yeF9xdWV1ZS5xdWV1ZV9sZW4sCisJCQkgIGFk YXB0ZXItPnJ4X3F1ZXVlLnF1ZXVlX2FkZHIsCisJCQkgIGFkYXB0ZXItPnJ4X3F1ZXVlLnF1ZXVl X2RtYSk7CisKKwlmb3IgKGkgPSAwOyBpIDwgSUJNVkVUSF9OVU1fQlVGRl9QT09MUzsgaSsrKQor CQlpZiAoYWRhcHRlci0+cnhfYnVmZl9wb29sW2ldLmFjdGl2ZSkKKwkJCWlibXZldGhfZnJlZV9i dWZmZXJfcG9vbChhZGFwdGVyLAorCQkJCQkJICZhZGFwdGVyLT5yeF9idWZmX3Bvb2xbaV0pOwor CisJZG1hX3VubWFwX3NpbmdsZSgmYWRhcHRlci0+dmRldi0+ZGV2LCBhZGFwdGVyLT5ib3VuY2Vf YnVmZmVyX2RtYSwKKwkJCSBhZGFwdGVyLT5uZXRkZXYtPm10dSArIElCTVZFVEhfQlVGRl9PSCwK KwkJCSBETUFfQklESVJFQ1RJT05BTCk7CisJa2ZyZWUoYWRhcHRlci0+Ym91bmNlX2J1ZmZlcik7 CiAKIAluZXRkZXZfZGJnKG5ldGRldiwgImNsb3NlIGNvbXBsZXRlXG4iKTsKIApAQCAtMTY0OCwx MSArMTY0Miw2IEBAIHN0YXRpYyBpbnQgaWJtdmV0aF9wcm9iZShzdHJ1Y3QgdmlvX2RldiAqZGV2 LCBjb25zdCBzdHJ1Y3QgdmlvX2RldmljZV9pZCAqaWQpCiAJfQogCiAJbmV0ZGV2X2RiZyhuZXRk ZXYsICJhZGFwdGVyIEAgMHglcFxuIiwgYWRhcHRlcik7Ci0KLQlhZGFwdGVyLT5idWZmZXJfbGlz dF9kbWEgPSBETUFfRVJST1JfQ09ERTsKLQlhZGFwdGVyLT5maWx0ZXJfbGlzdF9kbWEgPSBETUFf RVJST1JfQ09ERTsKLQlhZGFwdGVyLT5yeF9xdWV1ZS5xdWV1ZV9kbWEgPSBETUFfRVJST1JfQ09E RTsKLQogCW5ldGRldl9kYmcobmV0ZGV2LCAicmVnaXN0ZXJpbmcgbmV0ZGV2Li4uXG4iKTsKIAog CWlibXZldGhfc2V0X2ZlYXR1cmVzKG5ldGRldiwgbmV0ZGV2LT5mZWF0dXJlcyk7Ci0tIAoyLjEx LjAKCl9fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fCmRyaS1k ZXZlbCBtYWlsaW5nIGxpc3QKZHJpLWRldmVsQGxpc3RzLmZyZWVkZXNrdG9wLm9yZwpodHRwczov L2xpc3RzLmZyZWVkZXNrdG9wLm9yZy9tYWlsbWFuL2xpc3RpbmZvL2RyaS1kZXZlbAo= From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752621AbdFPSLX (ORCPT ); Fri, 16 Jun 2017 14:11:23 -0400 Received: from bombadil.infradead.org ([65.50.211.133]:40117 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751012AbdFPSLS (ORCPT ); Fri, 16 Jun 2017 14:11:18 -0400 From: Christoph Hellwig To: x86@kernel.org, linux-arm-kernel@lists.infradead.org, xen-devel@lists.xenproject.org, linux-c6x-dev@linux-c6x.org, linux-hexagon@vger.kernel.org, linux-ia64@vger.kernel.org, linux-mips@linux-mips.org, openrisc@lists.librecores.org, linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org, linux-sh@vger.kernel.org, sparclinux@vger.kernel.org, linux-xtensa@linux-xtensa.org, dmaengine@vger.kernel.org, linux-tegra@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-samsung-soc@vger.kernel.org, iommu@lists.linux-foundation.org, netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Subject: [PATCH 02/44] ibmveth: properly unwind on init errors Date: Fri, 16 Jun 2017 20:10:17 +0200 Message-Id: <20170616181059.19206-3-hch@lst.de> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170616181059.19206-1-hch@lst.de> References: <20170616181059.19206-1-hch@lst.de> X-SRS-Rewrite: SMTP reverse-path rewritten from by bombadil.infradead.org. See http://www.infradead.org/rpr.html Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org That way the driver doesn't have to rely on DMA_ERROR_CODE, which is not a public API and going away. Signed-off-by: Christoph Hellwig Acked-by: David S. Miller --- drivers/net/ethernet/ibm/ibmveth.c | 159 +++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 85 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 72ab7b6bf20b..3ac27f59e595 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -467,56 +467,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) } } -static void ibmveth_cleanup(struct ibmveth_adapter *adapter) -{ - int i; - struct device *dev = &adapter->vdev->dev; - - if (adapter->buffer_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { - dma_unmap_single(dev, adapter->buffer_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->buffer_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->buffer_list_addr); - adapter->buffer_list_addr = NULL; - } - - if (adapter->filter_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->filter_list_dma)) { - dma_unmap_single(dev, adapter->filter_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->filter_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->filter_list_addr); - adapter->filter_list_addr = NULL; - } - - if (adapter->rx_queue.queue_addr != NULL) { - dma_free_coherent(dev, adapter->rx_queue.queue_len, - adapter->rx_queue.queue_addr, - adapter->rx_queue.queue_dma); - adapter->rx_queue.queue_addr = NULL; - } - - for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) - if (adapter->rx_buff_pool[i].active) - ibmveth_free_buffer_pool(adapter, - &adapter->rx_buff_pool[i]); - - if (adapter->bounce_buffer != NULL) { - if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->bounce_buffer_dma, - adapter->netdev->mtu + IBMVETH_BUFF_OH, - DMA_BIDIRECTIONAL); - adapter->bounce_buffer_dma = DMA_ERROR_CODE; - } - kfree(adapter->bounce_buffer); - adapter->bounce_buffer = NULL; - } -} - static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, union ibmveth_buf_desc rxq_desc, u64 mac_address) { @@ -573,14 +523,17 @@ static int ibmveth_open(struct net_device *netdev) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) rxq_entries += adapter->rx_buff_pool[i].size; + rc = -ENOMEM; adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); - adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->buffer_list_addr) { + netdev_err(netdev, "unable to allocate list pages\n"); + goto out; + } - if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { - netdev_err(netdev, "unable to allocate filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->filter_list_addr) { + netdev_err(netdev, "unable to allocate filter pages\n"); + goto out_free_buffer_list; } dev = &adapter->vdev->dev; @@ -590,22 +543,21 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.queue_addr = dma_alloc_coherent(dev, adapter->rx_queue.queue_len, &adapter->rx_queue.queue_dma, GFP_KERNEL); - if (!adapter->rx_queue.queue_addr) { - rc = -ENOMEM; - goto err_out; - } + if (!adapter->rx_queue.queue_addr) + goto out_free_filter_list; adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, adapter->buffer_list_dma)) { + netdev_err(netdev, "unable to map buffer list pages\n"); + goto out_free_queue_mem; + } + adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - - if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || - (dma_mapping_error(dev, adapter->filter_list_dma))) { - netdev_err(netdev, "unable to map filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + if (dma_mapping_error(dev, adapter->filter_list_dma)) { + netdev_err(netdev, "unable to map filter list pages\n"); + goto out_unmap_buffer_list; } adapter->rx_queue.index = 0; @@ -636,7 +588,7 @@ static int ibmveth_open(struct net_device *netdev) rxq_desc.desc, mac_address); rc = -ENONET; - goto err_out; + goto out_unmap_filter_list; } for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { @@ -646,7 +598,7 @@ static int ibmveth_open(struct net_device *netdev) netdev_err(netdev, "unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; rc = -ENOMEM; - goto err_out; + goto out_free_buffer_pools; } } @@ -660,22 +612,21 @@ static int ibmveth_open(struct net_device *netdev) lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); - goto err_out; + goto out_free_buffer_pools; } + rc = -ENOMEM; adapter->bounce_buffer = kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); - if (!adapter->bounce_buffer) { - rc = -ENOMEM; - goto err_out_free_irq; - } + if (!adapter->bounce_buffer) + goto out_free_irq; + adapter->bounce_buffer_dma = dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { netdev_err(netdev, "unable to map bounce buffer\n"); - rc = -ENOMEM; - goto err_out_free_irq; + goto out_free_bounce_buffer; } netdev_dbg(netdev, "initial replenish cycle\n"); @@ -687,10 +638,31 @@ static int ibmveth_open(struct net_device *netdev) return 0; -err_out_free_irq: +out_free_bounce_buffer: + kfree(adapter->bounce_buffer); +out_free_irq: free_irq(netdev->irq, netdev); -err_out: - ibmveth_cleanup(adapter); +out_free_buffer_pools: + while (--i >= 0) { + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + } +out_unmap_filter_list: + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_unmap_buffer_list: + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_free_queue_mem: + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); +out_free_filter_list: + free_page((unsigned long)adapter->filter_list_addr); +out_free_buffer_list: + free_page((unsigned long)adapter->buffer_list_addr); +out: napi_disable(&adapter->napi); return rc; } @@ -698,7 +670,9 @@ static int ibmveth_open(struct net_device *netdev) static int ibmveth_close(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; long lpar_rc; + int i; netdev_dbg(netdev, "close starting\n"); @@ -722,7 +696,27 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_update_rx_no_buffer(adapter); - ibmveth_cleanup(adapter); + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->buffer_list_addr); + + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->filter_list_addr); + + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + + dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + DMA_BIDIRECTIONAL); + kfree(adapter->bounce_buffer); netdev_dbg(netdev, "close complete\n"); @@ -1648,11 +1642,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); - - adapter->buffer_list_dma = DMA_ERROR_CODE; - adapter->filter_list_dma = DMA_ERROR_CODE; - adapter->rx_queue.queue_dma = DMA_ERROR_CODE; - netdev_dbg(netdev, "registering netdev...\n"); ibmveth_set_features(netdev, netdev->features); -- 2.11.0 From mboxrd@z Thu Jan 1 00:00:00 1970 From: hch@lst.de (Christoph Hellwig) Date: Fri, 16 Jun 2017 20:10:17 +0200 Subject: [PATCH 02/44] ibmveth: properly unwind on init errors In-Reply-To: <20170616181059.19206-1-hch@lst.de> References: <20170616181059.19206-1-hch@lst.de> Message-ID: <20170616181059.19206-3-hch@lst.de> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org That way the driver doesn't have to rely on DMA_ERROR_CODE, which is not a public API and going away. Signed-off-by: Christoph Hellwig Acked-by: David S. Miller --- drivers/net/ethernet/ibm/ibmveth.c | 159 +++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 85 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 72ab7b6bf20b..3ac27f59e595 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -467,56 +467,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) } } -static void ibmveth_cleanup(struct ibmveth_adapter *adapter) -{ - int i; - struct device *dev = &adapter->vdev->dev; - - if (adapter->buffer_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { - dma_unmap_single(dev, adapter->buffer_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->buffer_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->buffer_list_addr); - adapter->buffer_list_addr = NULL; - } - - if (adapter->filter_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->filter_list_dma)) { - dma_unmap_single(dev, adapter->filter_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->filter_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->filter_list_addr); - adapter->filter_list_addr = NULL; - } - - if (adapter->rx_queue.queue_addr != NULL) { - dma_free_coherent(dev, adapter->rx_queue.queue_len, - adapter->rx_queue.queue_addr, - adapter->rx_queue.queue_dma); - adapter->rx_queue.queue_addr = NULL; - } - - for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) - if (adapter->rx_buff_pool[i].active) - ibmveth_free_buffer_pool(adapter, - &adapter->rx_buff_pool[i]); - - if (adapter->bounce_buffer != NULL) { - if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->bounce_buffer_dma, - adapter->netdev->mtu + IBMVETH_BUFF_OH, - DMA_BIDIRECTIONAL); - adapter->bounce_buffer_dma = DMA_ERROR_CODE; - } - kfree(adapter->bounce_buffer); - adapter->bounce_buffer = NULL; - } -} - static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, union ibmveth_buf_desc rxq_desc, u64 mac_address) { @@ -573,14 +523,17 @@ static int ibmveth_open(struct net_device *netdev) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) rxq_entries += adapter->rx_buff_pool[i].size; + rc = -ENOMEM; adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); - adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->buffer_list_addr) { + netdev_err(netdev, "unable to allocate list pages\n"); + goto out; + } - if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { - netdev_err(netdev, "unable to allocate filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->filter_list_addr) { + netdev_err(netdev, "unable to allocate filter pages\n"); + goto out_free_buffer_list; } dev = &adapter->vdev->dev; @@ -590,22 +543,21 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.queue_addr = dma_alloc_coherent(dev, adapter->rx_queue.queue_len, &adapter->rx_queue.queue_dma, GFP_KERNEL); - if (!adapter->rx_queue.queue_addr) { - rc = -ENOMEM; - goto err_out; - } + if (!adapter->rx_queue.queue_addr) + goto out_free_filter_list; adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, adapter->buffer_list_dma)) { + netdev_err(netdev, "unable to map buffer list pages\n"); + goto out_free_queue_mem; + } + adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - - if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || - (dma_mapping_error(dev, adapter->filter_list_dma))) { - netdev_err(netdev, "unable to map filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + if (dma_mapping_error(dev, adapter->filter_list_dma)) { + netdev_err(netdev, "unable to map filter list pages\n"); + goto out_unmap_buffer_list; } adapter->rx_queue.index = 0; @@ -636,7 +588,7 @@ static int ibmveth_open(struct net_device *netdev) rxq_desc.desc, mac_address); rc = -ENONET; - goto err_out; + goto out_unmap_filter_list; } for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { @@ -646,7 +598,7 @@ static int ibmveth_open(struct net_device *netdev) netdev_err(netdev, "unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; rc = -ENOMEM; - goto err_out; + goto out_free_buffer_pools; } } @@ -660,22 +612,21 @@ static int ibmveth_open(struct net_device *netdev) lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); - goto err_out; + goto out_free_buffer_pools; } + rc = -ENOMEM; adapter->bounce_buffer = kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); - if (!adapter->bounce_buffer) { - rc = -ENOMEM; - goto err_out_free_irq; - } + if (!adapter->bounce_buffer) + goto out_free_irq; + adapter->bounce_buffer_dma = dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { netdev_err(netdev, "unable to map bounce buffer\n"); - rc = -ENOMEM; - goto err_out_free_irq; + goto out_free_bounce_buffer; } netdev_dbg(netdev, "initial replenish cycle\n"); @@ -687,10 +638,31 @@ static int ibmveth_open(struct net_device *netdev) return 0; -err_out_free_irq: +out_free_bounce_buffer: + kfree(adapter->bounce_buffer); +out_free_irq: free_irq(netdev->irq, netdev); -err_out: - ibmveth_cleanup(adapter); +out_free_buffer_pools: + while (--i >= 0) { + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + } +out_unmap_filter_list: + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_unmap_buffer_list: + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_free_queue_mem: + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); +out_free_filter_list: + free_page((unsigned long)adapter->filter_list_addr); +out_free_buffer_list: + free_page((unsigned long)adapter->buffer_list_addr); +out: napi_disable(&adapter->napi); return rc; } @@ -698,7 +670,9 @@ static int ibmveth_open(struct net_device *netdev) static int ibmveth_close(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; long lpar_rc; + int i; netdev_dbg(netdev, "close starting\n"); @@ -722,7 +696,27 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_update_rx_no_buffer(adapter); - ibmveth_cleanup(adapter); + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->buffer_list_addr); + + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->filter_list_addr); + + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + + dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + DMA_BIDIRECTIONAL); + kfree(adapter->bounce_buffer); netdev_dbg(netdev, "close complete\n"); @@ -1648,11 +1642,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); - - adapter->buffer_list_dma = DMA_ERROR_CODE; - adapter->filter_list_dma = DMA_ERROR_CODE; - adapter->rx_queue.queue_dma = DMA_ERROR_CODE; - netdev_dbg(netdev, "registering netdev...\n"); ibmveth_set_features(netdev, netdev->features); -- 2.11.0