All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Christoph Hellwig <hch@lst.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH 3/4] powerpc/powernv: remove dead NPU DMA code
Date: Wed, 19 Jun 2019 10:34:54 +1000	[thread overview]
Message-ID: <db502ec4-2e8f-fbc3-9db2-3fe98464a62c@ozlabs.ru> (raw)
In-Reply-To: <20190523074924.19659-4-hch@lst.de>



On 23/05/2019 17:49, Christoph Hellwig wrote:
> None of these routines were ever used since they were added to the
> kernel.


It is still being used exactly in the way as it was explained before in
previous respins. Thanks.


> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/powerpc/include/asm/book3s/64/mmu.h |   2 -
>  arch/powerpc/include/asm/powernv.h       |  22 -
>  arch/powerpc/mm/book3s64/mmu_context.c   |   1 -
>  arch/powerpc/platforms/powernv/npu-dma.c | 556 -----------------------
>  4 files changed, 581 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
> index 74d24201fc4f..23b83d3593e2 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -116,8 +116,6 @@ typedef struct {
>  	/* Number of users of the external (Nest) MMU */
>  	atomic_t copros;
>  
> -	/* NPU NMMU context */
> -	struct npu_context *npu_context;
>  	struct hash_mm_context *hash_context;
>  
>  	unsigned long vdso_base;
> diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
> index 05b552418519..40f868c5e93c 100644
> --- a/arch/powerpc/include/asm/powernv.h
> +++ b/arch/powerpc/include/asm/powernv.h
> @@ -11,35 +11,13 @@
>  #define _ASM_POWERNV_H
>  
>  #ifdef CONFIG_PPC_POWERNV
> -#define NPU2_WRITE 1
>  extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
> -extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
> -			unsigned long flags,
> -			void (*cb)(struct npu_context *, void *),
> -			void *priv);
> -extern void pnv_npu2_destroy_context(struct npu_context *context,
> -				struct pci_dev *gpdev);
> -extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
> -				unsigned long *flags, unsigned long *status,
> -				int count);
>  
>  void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
>  
>  void pnv_tm_init(void);
>  #else
>  static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
> -static inline struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
> -			unsigned long flags,
> -			struct npu_context *(*cb)(struct npu_context *, void *),
> -			void *priv) { return ERR_PTR(-ENODEV); }
> -static inline void pnv_npu2_destroy_context(struct npu_context *context,
> -					struct pci_dev *gpdev) { }
> -
> -static inline int pnv_npu2_handle_fault(struct npu_context *context,
> -					uintptr_t *ea, unsigned long *flags,
> -					unsigned long *status, int count) {
> -	return -ENODEV;
> -}
>  
>  static inline void pnv_tm_init(void) { }
>  #endif
> diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
> index cb2b08635508..0dd3e631cf3e 100644
> --- a/arch/powerpc/mm/book3s64/mmu_context.c
> +++ b/arch/powerpc/mm/book3s64/mmu_context.c
> @@ -140,7 +140,6 @@ static int radix__init_new_context(struct mm_struct *mm)
>  	 */
>  	asm volatile("ptesync;isync" : : : "memory");
>  
> -	mm->context.npu_context = NULL;
>  	mm->context.hash_context = NULL;
>  
>  	return index;
> diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
> index 495550432f3d..4ed24132bb7c 100644
> --- a/arch/powerpc/platforms/powernv/npu-dma.c
> +++ b/arch/powerpc/platforms/powernv/npu-dma.c
> @@ -22,12 +22,6 @@
>  
>  #include "pci.h"
>  
> -/*
> - * spinlock to protect initialisation of an npu_context for a particular
> - * mm_struct.
> - */
> -static DEFINE_SPINLOCK(npu_context_lock);
> -
>  static struct pci_dev *get_pci_dev(struct device_node *dn)
>  {
>  	struct pci_dn *pdn = PCI_DN(dn);
> @@ -362,15 +356,6 @@ struct npu_comp {
>  /* An NPU descriptor, valid for POWER9 only */
>  struct npu {
>  	int index;
> -	__be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
> -	unsigned int mmio_atsd_count;
> -
> -	/* Bitmask for MMIO register usage */
> -	unsigned long mmio_atsd_usage;
> -
> -	/* Do we need to explicitly flush the nest mmu? */
> -	bool nmmu_flush;
> -
>  	struct npu_comp npucomp;
>  };
>  
> @@ -627,534 +612,8 @@ struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
>  }
>  #endif /* CONFIG_IOMMU_API */
>  
> -/* Maximum number of nvlinks per npu */
> -#define NV_MAX_LINKS 6
> -
> -/* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
> -static int max_npu2_index;
> -
> -struct npu_context {
> -	struct mm_struct *mm;
> -	struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
> -	struct mmu_notifier mn;
> -	struct kref kref;
> -	bool nmmu_flush;
> -
> -	/* Callback to stop translation requests on a given GPU */
> -	void (*release_cb)(struct npu_context *context, void *priv);
> -
> -	/*
> -	 * Private pointer passed to the above callback for usage by
> -	 * device drivers.
> -	 */
> -	void *priv;
> -};
> -
> -struct mmio_atsd_reg {
> -	struct npu *npu;
> -	int reg;
> -};
> -
> -/*
> - * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
> - * if none are available.
> - */
> -static int get_mmio_atsd_reg(struct npu *npu)
> -{
> -	int i;
> -
> -	for (i = 0; i < npu->mmio_atsd_count; i++) {
> -		if (!test_bit(i, &npu->mmio_atsd_usage))
> -			if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
> -				return i;
> -	}
> -
> -	return -ENOSPC;
> -}
> -
> -static void put_mmio_atsd_reg(struct npu *npu, int reg)
> -{
> -	clear_bit_unlock(reg, &npu->mmio_atsd_usage);
> -}
> -
> -/* MMIO ATSD register offsets */
> -#define XTS_ATSD_LAUNCH 0
> -#define XTS_ATSD_AVA    1
> -#define XTS_ATSD_STAT   2
> -
> -static unsigned long get_atsd_launch_val(unsigned long pid, unsigned long psize)
> -{
> -	unsigned long launch = 0;
> -
> -	if (psize == MMU_PAGE_COUNT) {
> -		/* IS set to invalidate entire matching PID */
> -		launch |= PPC_BIT(12);
> -	} else {
> -		/* AP set to invalidate region of psize */
> -		launch |= (u64)mmu_get_ap(psize) << PPC_BITLSHIFT(17);
> -	}
> -
> -	/* PRS set to process-scoped */
> -	launch |= PPC_BIT(13);
> -
> -	/* PID */
> -	launch |= pid << PPC_BITLSHIFT(38);
> -
> -	/* Leave "No flush" (bit 39) 0 so every ATSD performs a flush */
> -
> -	return launch;
> -}
> -
> -static void mmio_atsd_regs_write(struct mmio_atsd_reg
> -			mmio_atsd_reg[NV_MAX_NPUS], unsigned long offset,
> -			unsigned long val)
> -{
> -	struct npu *npu;
> -	int i, reg;
> -
> -	for (i = 0; i <= max_npu2_index; i++) {
> -		reg = mmio_atsd_reg[i].reg;
> -		if (reg < 0)
> -			continue;
> -
> -		npu = mmio_atsd_reg[i].npu;
> -		__raw_writeq_be(val, npu->mmio_atsd_regs[reg] + offset);
> -	}
> -}
> -
> -static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
> -				unsigned long pid)
> -{
> -	unsigned long launch = get_atsd_launch_val(pid, MMU_PAGE_COUNT);
> -
> -	/* Invalidating the entire process doesn't use a va */
> -	mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch);
> -}
> -
> -static void mmio_invalidate_range(struct mmio_atsd_reg
> -			mmio_atsd_reg[NV_MAX_NPUS], unsigned long pid,
> -			unsigned long start, unsigned long psize)
> -{
> -	unsigned long launch = get_atsd_launch_val(pid, psize);
> -
> -	/* Write all VAs first */
> -	mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_AVA, start);
> -
> -	/* Issue one barrier for all address writes */
> -	eieio();
> -
> -	/* Launch */
> -	mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch);
> -}
> -
> -#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
> -
> -static void mmio_invalidate_wait(
> -	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
> -{
> -	struct npu *npu;
> -	int i, reg;
> -
> -	/* Wait for all invalidations to complete */
> -	for (i = 0; i <= max_npu2_index; i++) {
> -		if (mmio_atsd_reg[i].reg < 0)
> -			continue;
> -
> -		/* Wait for completion */
> -		npu = mmio_atsd_reg[i].npu;
> -		reg = mmio_atsd_reg[i].reg;
> -		while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
> -			cpu_relax();
> -	}
> -}
> -
> -/*
> - * Acquires all the address translation shootdown (ATSD) registers required to
> - * launch an ATSD on all links this npu_context is active on.
> - */
> -static void acquire_atsd_reg(struct npu_context *npu_context,
> -			struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
> -{
> -	int i, j;
> -	struct npu *npu;
> -	struct pci_dev *npdev;
> -
> -	for (i = 0; i <= max_npu2_index; i++) {
> -		mmio_atsd_reg[i].reg = -1;
> -		for (j = 0; j < NV_MAX_LINKS; j++) {
> -			/*
> -			 * There are no ordering requirements with respect to
> -			 * the setup of struct npu_context, but to ensure
> -			 * consistent behaviour we need to ensure npdev[][] is
> -			 * only read once.
> -			 */
> -			npdev = READ_ONCE(npu_context->npdev[i][j]);
> -			if (!npdev)
> -				continue;
> -
> -			npu = pci_bus_to_host(npdev->bus)->npu;
> -			if (!npu)
> -				continue;
> -
> -			mmio_atsd_reg[i].npu = npu;
> -			mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
> -			while (mmio_atsd_reg[i].reg < 0) {
> -				mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
> -				cpu_relax();
> -			}
> -			break;
> -		}
> -	}
> -}
> -
> -/*
> - * Release previously acquired ATSD registers. To avoid deadlocks the registers
> - * must be released in the same order they were acquired above in
> - * acquire_atsd_reg.
> - */
> -static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
> -{
> -	int i;
> -
> -	for (i = 0; i <= max_npu2_index; i++) {
> -		/*
> -		 * We can't rely on npu_context->npdev[][] being the same here
> -		 * as when acquire_atsd_reg() was called, hence we use the
> -		 * values stored in mmio_atsd_reg during the acquire phase
> -		 * rather than re-reading npdev[][].
> -		 */
> -		if (mmio_atsd_reg[i].reg < 0)
> -			continue;
> -
> -		put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg);
> -	}
> -}
> -
> -/*
> - * Invalidate a virtual address range
> - */
> -static void mmio_invalidate(struct npu_context *npu_context,
> -			unsigned long start, unsigned long size)
> -{
> -	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
> -	unsigned long pid = npu_context->mm->context.id;
> -	unsigned long atsd_start = 0;
> -	unsigned long end = start + size - 1;
> -	int atsd_psize = MMU_PAGE_COUNT;
> -
> -	/*
> -	 * Convert the input range into one of the supported sizes. If the range
> -	 * doesn't fit, use the next larger supported size. Invalidation latency
> -	 * is high, so over-invalidation is preferred to issuing multiple
> -	 * invalidates.
> -	 *
> -	 * A 4K page size isn't supported by NPU/GPU ATS, so that case is
> -	 * ignored.
> -	 */
> -	if (size == SZ_64K) {
> -		atsd_start = start;
> -		atsd_psize = MMU_PAGE_64K;
> -	} else if (ALIGN_DOWN(start, SZ_2M) == ALIGN_DOWN(end, SZ_2M)) {
> -		atsd_start = ALIGN_DOWN(start, SZ_2M);
> -		atsd_psize = MMU_PAGE_2M;
> -	} else if (ALIGN_DOWN(start, SZ_1G) == ALIGN_DOWN(end, SZ_1G)) {
> -		atsd_start = ALIGN_DOWN(start, SZ_1G);
> -		atsd_psize = MMU_PAGE_1G;
> -	}
> -
> -	if (npu_context->nmmu_flush)
> -		/*
> -		 * Unfortunately the nest mmu does not support flushing specific
> -		 * addresses so we have to flush the whole mm once before
> -		 * shooting down the GPU translation.
> -		 */
> -		flush_all_mm(npu_context->mm);
> -
> -	/*
> -	 * Loop over all the NPUs this process is active on and launch
> -	 * an invalidate.
> -	 */
> -	acquire_atsd_reg(npu_context, mmio_atsd_reg);
> -
> -	if (atsd_psize == MMU_PAGE_COUNT)
> -		mmio_invalidate_pid(mmio_atsd_reg, pid);
> -	else
> -		mmio_invalidate_range(mmio_atsd_reg, pid, atsd_start,
> -					atsd_psize);
> -
> -	mmio_invalidate_wait(mmio_atsd_reg);
> -
> -	/*
> -	 * The GPU requires two flush ATSDs to ensure all entries have been
> -	 * flushed. We use PID 0 as it will never be used for a process on the
> -	 * GPU.
> -	 */
> -	mmio_invalidate_pid(mmio_atsd_reg, 0);
> -	mmio_invalidate_wait(mmio_atsd_reg);
> -	mmio_invalidate_pid(mmio_atsd_reg, 0);
> -	mmio_invalidate_wait(mmio_atsd_reg);
> -
> -	release_atsd_reg(mmio_atsd_reg);
> -}
> -
> -static void pnv_npu2_mn_release(struct mmu_notifier *mn,
> -				struct mm_struct *mm)
> -{
> -	struct npu_context *npu_context = mn_to_npu_context(mn);
> -
> -	/* Call into device driver to stop requests to the NMMU */
> -	if (npu_context->release_cb)
> -		npu_context->release_cb(npu_context, npu_context->priv);
> -
> -	/*
> -	 * There should be no more translation requests for this PID, but we
> -	 * need to ensure any entries for it are removed from the TLB.
> -	 */
> -	mmio_invalidate(npu_context, 0, ~0UL);
> -}
> -
> -static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
> -					struct mm_struct *mm,
> -					unsigned long start, unsigned long end)
> -{
> -	struct npu_context *npu_context = mn_to_npu_context(mn);
> -	mmio_invalidate(npu_context, start, end - start);
> -}
> -
> -static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
> -	.release = pnv_npu2_mn_release,
> -	.invalidate_range = pnv_npu2_mn_invalidate_range,
> -};
> -
> -/*
> - * Call into OPAL to setup the nmmu context for the current task in
> - * the NPU. This must be called to setup the context tables before the
> - * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
> - *
> - * A release callback should be registered to allow a device driver to
> - * be notified that it should not launch any new translation requests
> - * as the final TLB invalidate is about to occur.
> - *
> - * Returns an error if there no contexts are currently available or a
> - * npu_context which should be passed to pnv_npu2_handle_fault().
> - *
> - * mmap_sem must be held in write mode and must not be called from interrupt
> - * context.
> - */
> -struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
> -			unsigned long flags,
> -			void (*cb)(struct npu_context *, void *),
> -			void *priv)
> -{
> -	int rc;
> -	u32 nvlink_index;
> -	struct device_node *nvlink_dn;
> -	struct mm_struct *mm = current->mm;
> -	struct npu *npu;
> -	struct npu_context *npu_context;
> -	struct pci_controller *hose;
> -
> -	/*
> -	 * At present we don't support GPUs connected to multiple NPUs and I'm
> -	 * not sure the hardware does either.
> -	 */
> -	struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
> -
> -	if (!npdev)
> -		/* No nvlink associated with this GPU device */
> -		return ERR_PTR(-ENODEV);
> -
> -	/* We only support DR/PR/HV in pnv_npu2_map_lpar_dev() */
> -	if (flags & ~(MSR_DR | MSR_PR | MSR_HV))
> -		return ERR_PTR(-EINVAL);
> -
> -	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
> -	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
> -							&nvlink_index)))
> -		return ERR_PTR(-ENODEV);
> -
> -	if (!mm || mm->context.id == 0) {
> -		/*
> -		 * Kernel thread contexts are not supported and context id 0 is
> -		 * reserved on the GPU.
> -		 */
> -		return ERR_PTR(-EINVAL);
> -	}
> -
> -	hose = pci_bus_to_host(npdev->bus);
> -	npu = hose->npu;
> -	if (!npu)
> -		return ERR_PTR(-ENODEV);
> -
> -	/*
> -	 * We store the npu pci device so we can more easily get at the
> -	 * associated npus.
> -	 */
> -	spin_lock(&npu_context_lock);
> -	npu_context = mm->context.npu_context;
> -	if (npu_context) {
> -		if (npu_context->release_cb != cb ||
> -			npu_context->priv != priv) {
> -			spin_unlock(&npu_context_lock);
> -			return ERR_PTR(-EINVAL);
> -		}
> -
> -		WARN_ON(!kref_get_unless_zero(&npu_context->kref));
> -	}
> -	spin_unlock(&npu_context_lock);
> -
> -	if (!npu_context) {
> -		/*
> -		 * We can set up these fields without holding the
> -		 * npu_context_lock as the npu_context hasn't been returned to
> -		 * the caller meaning it can't be destroyed. Parallel allocation
> -		 * is protected against by mmap_sem.
> -		 */
> -		rc = -ENOMEM;
> -		npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
> -		if (npu_context) {
> -			kref_init(&npu_context->kref);
> -			npu_context->mm = mm;
> -			npu_context->mn.ops = &nv_nmmu_notifier_ops;
> -			rc = __mmu_notifier_register(&npu_context->mn, mm);
> -		}
> -
> -		if (rc) {
> -			kfree(npu_context);
> -			return ERR_PTR(rc);
> -		}
> -
> -		mm->context.npu_context = npu_context;
> -	}
> -
> -	npu_context->release_cb = cb;
> -	npu_context->priv = priv;
> -
> -	/*
> -	 * npdev is a pci_dev pointer setup by the PCI code. We assign it to
> -	 * npdev[][] to indicate to the mmu notifiers that an invalidation
> -	 * should also be sent over this nvlink. The notifiers don't use any
> -	 * other fields in npu_context, so we just need to ensure that when they
> -	 * deference npu_context->npdev[][] it is either a valid pointer or
> -	 * NULL.
> -	 */
> -	WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
> -
> -	if (!npu->nmmu_flush) {
> -		/*
> -		 * If we're not explicitly flushing ourselves we need to mark
> -		 * the thread for global flushes
> -		 */
> -		npu_context->nmmu_flush = false;
> -		mm_context_add_copro(mm);
> -	} else
> -		npu_context->nmmu_flush = true;
> -
> -	return npu_context;
> -}
> -EXPORT_SYMBOL(pnv_npu2_init_context);
> -
> -static void pnv_npu2_release_context(struct kref *kref)
> -{
> -	struct npu_context *npu_context =
> -		container_of(kref, struct npu_context, kref);
> -
> -	if (!npu_context->nmmu_flush)
> -		mm_context_remove_copro(npu_context->mm);
> -
> -	npu_context->mm->context.npu_context = NULL;
> -}
> -
> -/*
> - * Destroy a context on the given GPU. May free the npu_context if it is no
> - * longer active on any GPUs. Must not be called from interrupt context.
> - */
> -void pnv_npu2_destroy_context(struct npu_context *npu_context,
> -			struct pci_dev *gpdev)
> -{
> -	int removed;
> -	struct npu *npu;
> -	struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
> -	struct device_node *nvlink_dn;
> -	u32 nvlink_index;
> -	struct pci_controller *hose;
> -
> -	if (WARN_ON(!npdev))
> -		return;
> -
> -	hose = pci_bus_to_host(npdev->bus);
> -	npu = hose->npu;
> -	if (!npu)
> -		return;
> -	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
> -	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
> -							&nvlink_index)))
> -		return;
> -	WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
> -	spin_lock(&npu_context_lock);
> -	removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
> -	spin_unlock(&npu_context_lock);
> -
> -	/*
> -	 * We need to do this outside of pnv_npu2_release_context so that it is
> -	 * outside the spinlock as mmu_notifier_destroy uses SRCU.
> -	 */
> -	if (removed) {
> -		mmu_notifier_unregister(&npu_context->mn,
> -					npu_context->mm);
> -
> -		kfree(npu_context);
> -	}
> -
> -}
> -EXPORT_SYMBOL(pnv_npu2_destroy_context);
> -
> -/*
> - * Assumes mmap_sem is held for the contexts associated mm.
> - */
> -int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
> -			unsigned long *flags, unsigned long *status, int count)
> -{
> -	u64 rc = 0, result = 0;
> -	int i, is_write;
> -	struct page *page[1];
> -	const char __user *u;
> -	char c;
> -
> -	/* mmap_sem should be held so the struct_mm must be present */
> -	struct mm_struct *mm = context->mm;
> -
> -	WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
> -
> -	for (i = 0; i < count; i++) {
> -		is_write = flags[i] & NPU2_WRITE;
> -		rc = get_user_pages_remote(NULL, mm, ea[i], 1,
> -					is_write ? FOLL_WRITE : 0,
> -					page, NULL, NULL);
> -
> -		if (rc != 1) {
> -			status[i] = rc;
> -			result = -EFAULT;
> -			continue;
> -		}
> -
> -		/* Make sure partition scoped tree gets a pte */
> -		u = page_address(page[0]);
> -		if (__get_user(c, u))
> -			result = -EFAULT;
> -
> -		status[i] = 0;
> -		put_page(page[0]);
> -	}
> -
> -	return result;
> -}
> -EXPORT_SYMBOL(pnv_npu2_handle_fault);
> -
>  int pnv_npu2_init(struct pci_controller *hose)
>  {
> -	unsigned int i;
> -	u64 mmio_atsd;
>  	static int npu_index;
>  	struct npu *npu;
>  	int ret;
> @@ -1163,33 +622,18 @@ int pnv_npu2_init(struct pci_controller *hose)
>  	if (!npu)
>  		return -ENOMEM;
>  
> -	npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
> -
> -	for (i = 0; i < ARRAY_SIZE(npu->mmio_atsd_regs) &&
> -			!of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
> -				i, &mmio_atsd); i++)
> -		npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
> -
> -	pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
> -	npu->mmio_atsd_count = i;
> -	npu->mmio_atsd_usage = 0;
>  	npu_index++;
>  	if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
>  		ret = -ENOSPC;
>  		goto fail_exit;
>  	}
> -	max_npu2_index = npu_index;
>  	npu->index = npu_index;
>  	hose->npu = npu;
>  
>  	return 0;
>  
>  fail_exit:
> -	for (i = 0; i < npu->mmio_atsd_count; ++i)
> -		iounmap(npu->mmio_atsd_regs[i]);
> -
>  	kfree(npu);
> -
>  	return ret;
>  }
>  
> 

-- 
Alexey

WARNING: multiple messages have this Message-ID (diff)
From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Christoph Hellwig <hch@lst.de>
Cc: Paul Mackerras <paulus@samba.org>,
	linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH 3/4] powerpc/powernv: remove dead NPU DMA code
Date: Wed, 19 Jun 2019 10:34:54 +1000	[thread overview]
Message-ID: <db502ec4-2e8f-fbc3-9db2-3fe98464a62c@ozlabs.ru> (raw)
In-Reply-To: <20190523074924.19659-4-hch@lst.de>



On 23/05/2019 17:49, Christoph Hellwig wrote:
> None of these routines were ever used since they were added to the
> kernel.


It is still being used exactly in the way as it was explained before in
previous respins. Thanks.


> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/powerpc/include/asm/book3s/64/mmu.h |   2 -
>  arch/powerpc/include/asm/powernv.h       |  22 -
>  arch/powerpc/mm/book3s64/mmu_context.c   |   1 -
>  arch/powerpc/platforms/powernv/npu-dma.c | 556 -----------------------
>  4 files changed, 581 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
> index 74d24201fc4f..23b83d3593e2 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -116,8 +116,6 @@ typedef struct {
>  	/* Number of users of the external (Nest) MMU */
>  	atomic_t copros;
>  
> -	/* NPU NMMU context */
> -	struct npu_context *npu_context;
>  	struct hash_mm_context *hash_context;
>  
>  	unsigned long vdso_base;
> diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
> index 05b552418519..40f868c5e93c 100644
> --- a/arch/powerpc/include/asm/powernv.h
> +++ b/arch/powerpc/include/asm/powernv.h
> @@ -11,35 +11,13 @@
>  #define _ASM_POWERNV_H
>  
>  #ifdef CONFIG_PPC_POWERNV
> -#define NPU2_WRITE 1
>  extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
> -extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
> -			unsigned long flags,
> -			void (*cb)(struct npu_context *, void *),
> -			void *priv);
> -extern void pnv_npu2_destroy_context(struct npu_context *context,
> -				struct pci_dev *gpdev);
> -extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
> -				unsigned long *flags, unsigned long *status,
> -				int count);
>  
>  void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
>  
>  void pnv_tm_init(void);
>  #else
>  static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
> -static inline struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
> -			unsigned long flags,
> -			struct npu_context *(*cb)(struct npu_context *, void *),
> -			void *priv) { return ERR_PTR(-ENODEV); }
> -static inline void pnv_npu2_destroy_context(struct npu_context *context,
> -					struct pci_dev *gpdev) { }
> -
> -static inline int pnv_npu2_handle_fault(struct npu_context *context,
> -					uintptr_t *ea, unsigned long *flags,
> -					unsigned long *status, int count) {
> -	return -ENODEV;
> -}
>  
>  static inline void pnv_tm_init(void) { }
>  #endif
> diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
> index cb2b08635508..0dd3e631cf3e 100644
> --- a/arch/powerpc/mm/book3s64/mmu_context.c
> +++ b/arch/powerpc/mm/book3s64/mmu_context.c
> @@ -140,7 +140,6 @@ static int radix__init_new_context(struct mm_struct *mm)
>  	 */
>  	asm volatile("ptesync;isync" : : : "memory");
>  
> -	mm->context.npu_context = NULL;
>  	mm->context.hash_context = NULL;
>  
>  	return index;
> diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
> index 495550432f3d..4ed24132bb7c 100644
> --- a/arch/powerpc/platforms/powernv/npu-dma.c
> +++ b/arch/powerpc/platforms/powernv/npu-dma.c
> @@ -22,12 +22,6 @@
>  
>  #include "pci.h"
>  
> -/*
> - * spinlock to protect initialisation of an npu_context for a particular
> - * mm_struct.
> - */
> -static DEFINE_SPINLOCK(npu_context_lock);
> -
>  static struct pci_dev *get_pci_dev(struct device_node *dn)
>  {
>  	struct pci_dn *pdn = PCI_DN(dn);
> @@ -362,15 +356,6 @@ struct npu_comp {
>  /* An NPU descriptor, valid for POWER9 only */
>  struct npu {
>  	int index;
> -	__be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
> -	unsigned int mmio_atsd_count;
> -
> -	/* Bitmask for MMIO register usage */
> -	unsigned long mmio_atsd_usage;
> -
> -	/* Do we need to explicitly flush the nest mmu? */
> -	bool nmmu_flush;
> -
>  	struct npu_comp npucomp;
>  };
>  
> @@ -627,534 +612,8 @@ struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
>  }
>  #endif /* CONFIG_IOMMU_API */
>  
> -/* Maximum number of nvlinks per npu */
> -#define NV_MAX_LINKS 6
> -
> -/* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
> -static int max_npu2_index;
> -
> -struct npu_context {
> -	struct mm_struct *mm;
> -	struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
> -	struct mmu_notifier mn;
> -	struct kref kref;
> -	bool nmmu_flush;
> -
> -	/* Callback to stop translation requests on a given GPU */
> -	void (*release_cb)(struct npu_context *context, void *priv);
> -
> -	/*
> -	 * Private pointer passed to the above callback for usage by
> -	 * device drivers.
> -	 */
> -	void *priv;
> -};
> -
> -struct mmio_atsd_reg {
> -	struct npu *npu;
> -	int reg;
> -};
> -
> -/*
> - * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
> - * if none are available.
> - */
> -static int get_mmio_atsd_reg(struct npu *npu)
> -{
> -	int i;
> -
> -	for (i = 0; i < npu->mmio_atsd_count; i++) {
> -		if (!test_bit(i, &npu->mmio_atsd_usage))
> -			if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
> -				return i;
> -	}
> -
> -	return -ENOSPC;
> -}
> -
> -static void put_mmio_atsd_reg(struct npu *npu, int reg)
> -{
> -	clear_bit_unlock(reg, &npu->mmio_atsd_usage);
> -}
> -
> -/* MMIO ATSD register offsets */
> -#define XTS_ATSD_LAUNCH 0
> -#define XTS_ATSD_AVA    1
> -#define XTS_ATSD_STAT   2
> -
> -static unsigned long get_atsd_launch_val(unsigned long pid, unsigned long psize)
> -{
> -	unsigned long launch = 0;
> -
> -	if (psize == MMU_PAGE_COUNT) {
> -		/* IS set to invalidate entire matching PID */
> -		launch |= PPC_BIT(12);
> -	} else {
> -		/* AP set to invalidate region of psize */
> -		launch |= (u64)mmu_get_ap(psize) << PPC_BITLSHIFT(17);
> -	}
> -
> -	/* PRS set to process-scoped */
> -	launch |= PPC_BIT(13);
> -
> -	/* PID */
> -	launch |= pid << PPC_BITLSHIFT(38);
> -
> -	/* Leave "No flush" (bit 39) 0 so every ATSD performs a flush */
> -
> -	return launch;
> -}
> -
> -static void mmio_atsd_regs_write(struct mmio_atsd_reg
> -			mmio_atsd_reg[NV_MAX_NPUS], unsigned long offset,
> -			unsigned long val)
> -{
> -	struct npu *npu;
> -	int i, reg;
> -
> -	for (i = 0; i <= max_npu2_index; i++) {
> -		reg = mmio_atsd_reg[i].reg;
> -		if (reg < 0)
> -			continue;
> -
> -		npu = mmio_atsd_reg[i].npu;
> -		__raw_writeq_be(val, npu->mmio_atsd_regs[reg] + offset);
> -	}
> -}
> -
> -static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
> -				unsigned long pid)
> -{
> -	unsigned long launch = get_atsd_launch_val(pid, MMU_PAGE_COUNT);
> -
> -	/* Invalidating the entire process doesn't use a va */
> -	mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch);
> -}
> -
> -static void mmio_invalidate_range(struct mmio_atsd_reg
> -			mmio_atsd_reg[NV_MAX_NPUS], unsigned long pid,
> -			unsigned long start, unsigned long psize)
> -{
> -	unsigned long launch = get_atsd_launch_val(pid, psize);
> -
> -	/* Write all VAs first */
> -	mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_AVA, start);
> -
> -	/* Issue one barrier for all address writes */
> -	eieio();
> -
> -	/* Launch */
> -	mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch);
> -}
> -
> -#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
> -
> -static void mmio_invalidate_wait(
> -	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
> -{
> -	struct npu *npu;
> -	int i, reg;
> -
> -	/* Wait for all invalidations to complete */
> -	for (i = 0; i <= max_npu2_index; i++) {
> -		if (mmio_atsd_reg[i].reg < 0)
> -			continue;
> -
> -		/* Wait for completion */
> -		npu = mmio_atsd_reg[i].npu;
> -		reg = mmio_atsd_reg[i].reg;
> -		while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
> -			cpu_relax();
> -	}
> -}
> -
> -/*
> - * Acquires all the address translation shootdown (ATSD) registers required to
> - * launch an ATSD on all links this npu_context is active on.
> - */
> -static void acquire_atsd_reg(struct npu_context *npu_context,
> -			struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
> -{
> -	int i, j;
> -	struct npu *npu;
> -	struct pci_dev *npdev;
> -
> -	for (i = 0; i <= max_npu2_index; i++) {
> -		mmio_atsd_reg[i].reg = -1;
> -		for (j = 0; j < NV_MAX_LINKS; j++) {
> -			/*
> -			 * There are no ordering requirements with respect to
> -			 * the setup of struct npu_context, but to ensure
> -			 * consistent behaviour we need to ensure npdev[][] is
> -			 * only read once.
> -			 */
> -			npdev = READ_ONCE(npu_context->npdev[i][j]);
> -			if (!npdev)
> -				continue;
> -
> -			npu = pci_bus_to_host(npdev->bus)->npu;
> -			if (!npu)
> -				continue;
> -
> -			mmio_atsd_reg[i].npu = npu;
> -			mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
> -			while (mmio_atsd_reg[i].reg < 0) {
> -				mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
> -				cpu_relax();
> -			}
> -			break;
> -		}
> -	}
> -}
> -
> -/*
> - * Release previously acquired ATSD registers. To avoid deadlocks the registers
> - * must be released in the same order they were acquired above in
> - * acquire_atsd_reg.
> - */
> -static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
> -{
> -	int i;
> -
> -	for (i = 0; i <= max_npu2_index; i++) {
> -		/*
> -		 * We can't rely on npu_context->npdev[][] being the same here
> -		 * as when acquire_atsd_reg() was called, hence we use the
> -		 * values stored in mmio_atsd_reg during the acquire phase
> -		 * rather than re-reading npdev[][].
> -		 */
> -		if (mmio_atsd_reg[i].reg < 0)
> -			continue;
> -
> -		put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg);
> -	}
> -}
> -
> -/*
> - * Invalidate a virtual address range
> - */
> -static void mmio_invalidate(struct npu_context *npu_context,
> -			unsigned long start, unsigned long size)
> -{
> -	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
> -	unsigned long pid = npu_context->mm->context.id;
> -	unsigned long atsd_start = 0;
> -	unsigned long end = start + size - 1;
> -	int atsd_psize = MMU_PAGE_COUNT;
> -
> -	/*
> -	 * Convert the input range into one of the supported sizes. If the range
> -	 * doesn't fit, use the next larger supported size. Invalidation latency
> -	 * is high, so over-invalidation is preferred to issuing multiple
> -	 * invalidates.
> -	 *
> -	 * A 4K page size isn't supported by NPU/GPU ATS, so that case is
> -	 * ignored.
> -	 */
> -	if (size == SZ_64K) {
> -		atsd_start = start;
> -		atsd_psize = MMU_PAGE_64K;
> -	} else if (ALIGN_DOWN(start, SZ_2M) == ALIGN_DOWN(end, SZ_2M)) {
> -		atsd_start = ALIGN_DOWN(start, SZ_2M);
> -		atsd_psize = MMU_PAGE_2M;
> -	} else if (ALIGN_DOWN(start, SZ_1G) == ALIGN_DOWN(end, SZ_1G)) {
> -		atsd_start = ALIGN_DOWN(start, SZ_1G);
> -		atsd_psize = MMU_PAGE_1G;
> -	}
> -
> -	if (npu_context->nmmu_flush)
> -		/*
> -		 * Unfortunately the nest mmu does not support flushing specific
> -		 * addresses so we have to flush the whole mm once before
> -		 * shooting down the GPU translation.
> -		 */
> -		flush_all_mm(npu_context->mm);
> -
> -	/*
> -	 * Loop over all the NPUs this process is active on and launch
> -	 * an invalidate.
> -	 */
> -	acquire_atsd_reg(npu_context, mmio_atsd_reg);
> -
> -	if (atsd_psize == MMU_PAGE_COUNT)
> -		mmio_invalidate_pid(mmio_atsd_reg, pid);
> -	else
> -		mmio_invalidate_range(mmio_atsd_reg, pid, atsd_start,
> -					atsd_psize);
> -
> -	mmio_invalidate_wait(mmio_atsd_reg);
> -
> -	/*
> -	 * The GPU requires two flush ATSDs to ensure all entries have been
> -	 * flushed. We use PID 0 as it will never be used for a process on the
> -	 * GPU.
> -	 */
> -	mmio_invalidate_pid(mmio_atsd_reg, 0);
> -	mmio_invalidate_wait(mmio_atsd_reg);
> -	mmio_invalidate_pid(mmio_atsd_reg, 0);
> -	mmio_invalidate_wait(mmio_atsd_reg);
> -
> -	release_atsd_reg(mmio_atsd_reg);
> -}
> -
> -static void pnv_npu2_mn_release(struct mmu_notifier *mn,
> -				struct mm_struct *mm)
> -{
> -	struct npu_context *npu_context = mn_to_npu_context(mn);
> -
> -	/* Call into device driver to stop requests to the NMMU */
> -	if (npu_context->release_cb)
> -		npu_context->release_cb(npu_context, npu_context->priv);
> -
> -	/*
> -	 * There should be no more translation requests for this PID, but we
> -	 * need to ensure any entries for it are removed from the TLB.
> -	 */
> -	mmio_invalidate(npu_context, 0, ~0UL);
> -}
> -
> -static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
> -					struct mm_struct *mm,
> -					unsigned long start, unsigned long end)
> -{
> -	struct npu_context *npu_context = mn_to_npu_context(mn);
> -	mmio_invalidate(npu_context, start, end - start);
> -}
> -
> -static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
> -	.release = pnv_npu2_mn_release,
> -	.invalidate_range = pnv_npu2_mn_invalidate_range,
> -};
> -
> -/*
> - * Call into OPAL to setup the nmmu context for the current task in
> - * the NPU. This must be called to setup the context tables before the
> - * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
> - *
> - * A release callback should be registered to allow a device driver to
> - * be notified that it should not launch any new translation requests
> - * as the final TLB invalidate is about to occur.
> - *
> - * Returns an error if there no contexts are currently available or a
> - * npu_context which should be passed to pnv_npu2_handle_fault().
> - *
> - * mmap_sem must be held in write mode and must not be called from interrupt
> - * context.
> - */
> -struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
> -			unsigned long flags,
> -			void (*cb)(struct npu_context *, void *),
> -			void *priv)
> -{
> -	int rc;
> -	u32 nvlink_index;
> -	struct device_node *nvlink_dn;
> -	struct mm_struct *mm = current->mm;
> -	struct npu *npu;
> -	struct npu_context *npu_context;
> -	struct pci_controller *hose;
> -
> -	/*
> -	 * At present we don't support GPUs connected to multiple NPUs and I'm
> -	 * not sure the hardware does either.
> -	 */
> -	struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
> -
> -	if (!npdev)
> -		/* No nvlink associated with this GPU device */
> -		return ERR_PTR(-ENODEV);
> -
> -	/* We only support DR/PR/HV in pnv_npu2_map_lpar_dev() */
> -	if (flags & ~(MSR_DR | MSR_PR | MSR_HV))
> -		return ERR_PTR(-EINVAL);
> -
> -	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
> -	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
> -							&nvlink_index)))
> -		return ERR_PTR(-ENODEV);
> -
> -	if (!mm || mm->context.id == 0) {
> -		/*
> -		 * Kernel thread contexts are not supported and context id 0 is
> -		 * reserved on the GPU.
> -		 */
> -		return ERR_PTR(-EINVAL);
> -	}
> -
> -	hose = pci_bus_to_host(npdev->bus);
> -	npu = hose->npu;
> -	if (!npu)
> -		return ERR_PTR(-ENODEV);
> -
> -	/*
> -	 * We store the npu pci device so we can more easily get at the
> -	 * associated npus.
> -	 */
> -	spin_lock(&npu_context_lock);
> -	npu_context = mm->context.npu_context;
> -	if (npu_context) {
> -		if (npu_context->release_cb != cb ||
> -			npu_context->priv != priv) {
> -			spin_unlock(&npu_context_lock);
> -			return ERR_PTR(-EINVAL);
> -		}
> -
> -		WARN_ON(!kref_get_unless_zero(&npu_context->kref));
> -	}
> -	spin_unlock(&npu_context_lock);
> -
> -	if (!npu_context) {
> -		/*
> -		 * We can set up these fields without holding the
> -		 * npu_context_lock as the npu_context hasn't been returned to
> -		 * the caller meaning it can't be destroyed. Parallel allocation
> -		 * is protected against by mmap_sem.
> -		 */
> -		rc = -ENOMEM;
> -		npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
> -		if (npu_context) {
> -			kref_init(&npu_context->kref);
> -			npu_context->mm = mm;
> -			npu_context->mn.ops = &nv_nmmu_notifier_ops;
> -			rc = __mmu_notifier_register(&npu_context->mn, mm);
> -		}
> -
> -		if (rc) {
> -			kfree(npu_context);
> -			return ERR_PTR(rc);
> -		}
> -
> -		mm->context.npu_context = npu_context;
> -	}
> -
> -	npu_context->release_cb = cb;
> -	npu_context->priv = priv;
> -
> -	/*
> -	 * npdev is a pci_dev pointer setup by the PCI code. We assign it to
> -	 * npdev[][] to indicate to the mmu notifiers that an invalidation
> -	 * should also be sent over this nvlink. The notifiers don't use any
> -	 * other fields in npu_context, so we just need to ensure that when they
> -	 * deference npu_context->npdev[][] it is either a valid pointer or
> -	 * NULL.
> -	 */
> -	WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
> -
> -	if (!npu->nmmu_flush) {
> -		/*
> -		 * If we're not explicitly flushing ourselves we need to mark
> -		 * the thread for global flushes
> -		 */
> -		npu_context->nmmu_flush = false;
> -		mm_context_add_copro(mm);
> -	} else
> -		npu_context->nmmu_flush = true;
> -
> -	return npu_context;
> -}
> -EXPORT_SYMBOL(pnv_npu2_init_context);
> -
> -static void pnv_npu2_release_context(struct kref *kref)
> -{
> -	struct npu_context *npu_context =
> -		container_of(kref, struct npu_context, kref);
> -
> -	if (!npu_context->nmmu_flush)
> -		mm_context_remove_copro(npu_context->mm);
> -
> -	npu_context->mm->context.npu_context = NULL;
> -}
> -
> -/*
> - * Destroy a context on the given GPU. May free the npu_context if it is no
> - * longer active on any GPUs. Must not be called from interrupt context.
> - */
> -void pnv_npu2_destroy_context(struct npu_context *npu_context,
> -			struct pci_dev *gpdev)
> -{
> -	int removed;
> -	struct npu *npu;
> -	struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
> -	struct device_node *nvlink_dn;
> -	u32 nvlink_index;
> -	struct pci_controller *hose;
> -
> -	if (WARN_ON(!npdev))
> -		return;
> -
> -	hose = pci_bus_to_host(npdev->bus);
> -	npu = hose->npu;
> -	if (!npu)
> -		return;
> -	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
> -	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
> -							&nvlink_index)))
> -		return;
> -	WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
> -	spin_lock(&npu_context_lock);
> -	removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
> -	spin_unlock(&npu_context_lock);
> -
> -	/*
> -	 * We need to do this outside of pnv_npu2_release_context so that it is
> -	 * outside the spinlock as mmu_notifier_destroy uses SRCU.
> -	 */
> -	if (removed) {
> -		mmu_notifier_unregister(&npu_context->mn,
> -					npu_context->mm);
> -
> -		kfree(npu_context);
> -	}
> -
> -}
> -EXPORT_SYMBOL(pnv_npu2_destroy_context);
> -
> -/*
> - * Assumes mmap_sem is held for the contexts associated mm.
> - */
> -int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
> -			unsigned long *flags, unsigned long *status, int count)
> -{
> -	u64 rc = 0, result = 0;
> -	int i, is_write;
> -	struct page *page[1];
> -	const char __user *u;
> -	char c;
> -
> -	/* mmap_sem should be held so the struct_mm must be present */
> -	struct mm_struct *mm = context->mm;
> -
> -	WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
> -
> -	for (i = 0; i < count; i++) {
> -		is_write = flags[i] & NPU2_WRITE;
> -		rc = get_user_pages_remote(NULL, mm, ea[i], 1,
> -					is_write ? FOLL_WRITE : 0,
> -					page, NULL, NULL);
> -
> -		if (rc != 1) {
> -			status[i] = rc;
> -			result = -EFAULT;
> -			continue;
> -		}
> -
> -		/* Make sure partition scoped tree gets a pte */
> -		u = page_address(page[0]);
> -		if (__get_user(c, u))
> -			result = -EFAULT;
> -
> -		status[i] = 0;
> -		put_page(page[0]);
> -	}
> -
> -	return result;
> -}
> -EXPORT_SYMBOL(pnv_npu2_handle_fault);
> -
>  int pnv_npu2_init(struct pci_controller *hose)
>  {
> -	unsigned int i;
> -	u64 mmio_atsd;
>  	static int npu_index;
>  	struct npu *npu;
>  	int ret;
> @@ -1163,33 +622,18 @@ int pnv_npu2_init(struct pci_controller *hose)
>  	if (!npu)
>  		return -ENOMEM;
>  
> -	npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
> -
> -	for (i = 0; i < ARRAY_SIZE(npu->mmio_atsd_regs) &&
> -			!of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
> -				i, &mmio_atsd); i++)
> -		npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
> -
> -	pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
> -	npu->mmio_atsd_count = i;
> -	npu->mmio_atsd_usage = 0;
>  	npu_index++;
>  	if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
>  		ret = -ENOSPC;
>  		goto fail_exit;
>  	}
> -	max_npu2_index = npu_index;
>  	npu->index = npu_index;
>  	hose->npu = npu;
>  
>  	return 0;
>  
>  fail_exit:
> -	for (i = 0; i < npu->mmio_atsd_count; ++i)
> -		iounmap(npu->mmio_atsd_regs[i]);
> -
>  	kfree(npu);
> -
>  	return ret;
>  }
>  
> 

-- 
Alexey

  reply	other threads:[~2019-06-19  0:35 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-23  7:49 remove dead powernv code v2 Christoph Hellwig
2019-05-23  7:49 ` [PATCH 1/4] powerpc/powernv: remove the unused pnv_pci_set_p2p function Christoph Hellwig
2019-05-23  7:49 ` [PATCH 2/4] powerpc/powernv: remove the unused tunneling exports Christoph Hellwig
2019-06-21  1:47   ` Oliver O'Halloran
2019-06-21  1:47     ` Oliver O'Halloran
2019-06-21  9:21     ` Frederic Barrat
2019-06-21  9:21       ` Frederic Barrat
2019-06-21  9:24       ` Christoph Hellwig
2019-06-21  9:24         ` Christoph Hellwig
2019-05-23  7:49 ` [PATCH 3/4] powerpc/powernv: remove dead NPU DMA code Christoph Hellwig
2019-06-19  0:34   ` Alexey Kardashevskiy [this message]
2019-06-19  0:34     ` Alexey Kardashevskiy
2019-06-19  7:28     ` Christoph Hellwig
2019-06-19  7:28       ` Christoph Hellwig
2019-06-20  1:45       ` Alexey Kardashevskiy
2019-06-20  1:45         ` Alexey Kardashevskiy
2019-06-20  6:03         ` Christoph Hellwig
2019-06-20  6:03           ` Christoph Hellwig
2019-06-20  6:20           ` Alexey Kardashevskiy
2019-06-20  6:20             ` Alexey Kardashevskiy
2019-06-20  6:21             ` Christoph Hellwig
2019-06-20  6:21               ` Christoph Hellwig
2019-06-20  6:48               ` Alexey Kardashevskiy
2019-06-20  6:48                 ` Alexey Kardashevskiy
2019-05-23  7:49 ` [PATCH 4/4] powerpc/powernv: remove the unused vas_win_paste_addr and vas_win_id functions Christoph Hellwig
2019-06-21  1:40   ` Oliver O'Halloran
2019-06-21  1:40     ` Oliver O'Halloran
2019-06-17  6:46 ` remove dead powernv code v2 Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=db502ec4-2e8f-fbc3-9db2-3fe98464a62c@ozlabs.ru \
    --to=aik@ozlabs.ru \
    --cc=benh@kernel.crashing.org \
    --cc=hch@lst.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.