All of lore.kernel.org
 help / color / mirror / Atom feed
From: Minchan Kim <minchan@kernel.org>
To: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Michal Nazarewicz <mina86@mina86.com>,
	Russell King - ARM Linux <linux@arm.linux.org.uk>,
	kvm@vger.kernel.org, linux-mm@kvack.org,
	Gleb Natapov <gleb@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Alexander Graf <agraf@suse.de>,
	kvm-ppc@vger.kernel.org, linux-kernel@vger.kernel.org,
	Paul Mackerras <paulus@samba.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	linuxppc-dev@lists.ozlabs.org,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v2 03/10] DMA, CMA: separate core cma management codes from DMA APIs
Date: Thu, 12 Jun 2014 14:37:43 +0900	[thread overview]
Message-ID: <20140612053743.GF12415@bbox> (raw)
In-Reply-To: <1402543307-29800-4-git-send-email-iamjoonsoo.kim@lge.com>

On Thu, Jun 12, 2014 at 12:21:40PM +0900, Joonsoo Kim wrote:
> To prepare future generalization work on cma area management code,
> we need to separate core cma management codes from DMA APIs.
> We will extend these core functions to cover requirements of
> ppc kvm's cma area management functionality in following patches.
> This separation helps us not to touch DMA APIs while extending
> core functions.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
> index fb0cdce..8a44c82 100644
> --- a/drivers/base/dma-contiguous.c
> +++ b/drivers/base/dma-contiguous.c
> @@ -231,9 +231,9 @@ core_initcall(cma_init_reserved_areas);
>   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
>   * reserve in range from @base to @limit.
>   */
> -int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> -				       phys_addr_t limit, struct cma **res_cma,
> -				       bool fixed)
> +static int __init __dma_contiguous_reserve_area(phys_addr_t size,
> +				phys_addr_t base, phys_addr_t limit,
> +				struct cma **res_cma, bool fixed)
>  {
>  	struct cma *cma = &cma_areas[cma_area_count];
>  	phys_addr_t alignment;
> @@ -288,16 +288,30 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
>  
>  	pr_info("%s(): reserved %ld MiB at %08lx\n",
>  		__func__, (unsigned long)size / SZ_1M, (unsigned long)base);
> -
> -	/* Architecture specific contiguous memory fixup. */
> -	dma_contiguous_early_fixup(base, size);
>  	return 0;
> +
>  err:
>  	pr_err("%s(): failed to reserve %ld MiB\n",
>  		__func__, (unsigned long)size / SZ_1M);
>  	return ret;
>  }
>  
> +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> +				       phys_addr_t limit, struct cma **res_cma,
> +				       bool fixed)
> +{
> +	int ret;
> +
> +	ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
> +	if (ret)
> +		return ret;
> +
> +	/* Architecture specific contiguous memory fixup. */
> +	dma_contiguous_early_fixup(base, size);

In old, base and size are aligned with alignment and passed into arch fixup
but your patch is changing it.
I didn't look at what kinds of side effect it makes but just want to confirm.

> +
> +	return 0;
> +}
> +
>  static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>  {
>  	mutex_lock(&cma->lock);
> @@ -316,20 +330,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>   * global one. Requires architecture specific dev_get_cma_area() helper
>   * function.
>   */
> -struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
>  				       unsigned int align)
>  {
>  	unsigned long mask, pfn, pageno, start = 0;
> -	struct cma *cma = dev_get_cma_area(dev);
>  	struct page *page = NULL;
>  	int ret;
>  
>  	if (!cma || !cma->count)
>  		return NULL;
>  
> -	if (align > CONFIG_CMA_ALIGNMENT)
> -		align = CONFIG_CMA_ALIGNMENT;
> -
>  	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
>  		 count, align);
>  
> @@ -377,6 +387,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>  	return page;
>  }
>  

Please move the description in __dma_alloc_from_contiguous to here exported API.

> +struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +				       unsigned int align)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	if (align > CONFIG_CMA_ALIGNMENT)
> +		align = CONFIG_CMA_ALIGNMENT;
> +
> +	return __dma_alloc_from_contiguous(cma, count, align);
> +}
> +
>  /**
>   * dma_release_from_contiguous() - release allocated pages
>   * @dev:   Pointer to device for which the pages were allocated.
> @@ -387,10 +408,9 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>   * It returns false when provided pages do not belong to contiguous area and
>   * true otherwise.
>   */
> -bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
>  				 int count)
>  {
> -	struct cma *cma = dev_get_cma_area(dev);
>  	unsigned long pfn;
>  
>  	if (!cma || !pages)
> @@ -410,3 +430,11 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>  
>  	return true;
>  }
> +

Ditto.

> +bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +				 int count)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	return __dma_release_from_contiguous(cma, pages, count);
> +}
> -- 
> 1.7.9.5

-- 
Kind regards,
Minchan Kim

WARNING: multiple messages have this Message-ID (diff)
From: Minchan Kim <minchan@kernel.org>
To: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Michal Nazarewicz <mina86@mina86.com>,
	Russell King - ARM Linux <linux@arm.linux.org.uk>,
	kvm@vger.kernel.org, linux-mm@kvack.org,
	Gleb Natapov <gleb@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Alexander Graf <agraf@suse.de>,
	kvm-ppc@vger.kernel.org, linux-kernel@vger.kernel.org,
	Paul Mackerras <paulus@samba.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	linuxppc-dev@lists.ozlabs.org,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v2 03/10] DMA, CMA: separate core cma management codes from DMA APIs
Date: Thu, 12 Jun 2014 14:37:43 +0900	[thread overview]
Message-ID: <20140612053743.GF12415@bbox> (raw)
In-Reply-To: <1402543307-29800-4-git-send-email-iamjoonsoo.kim@lge.com>

On Thu, Jun 12, 2014 at 12:21:40PM +0900, Joonsoo Kim wrote:
> To prepare future generalization work on cma area management code,
> we need to separate core cma management codes from DMA APIs.
> We will extend these core functions to cover requirements of
> ppc kvm's cma area management functionality in following patches.
> This separation helps us not to touch DMA APIs while extending
> core functions.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
> index fb0cdce..8a44c82 100644
> --- a/drivers/base/dma-contiguous.c
> +++ b/drivers/base/dma-contiguous.c
> @@ -231,9 +231,9 @@ core_initcall(cma_init_reserved_areas);
>   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
>   * reserve in range from @base to @limit.
>   */
> -int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> -				       phys_addr_t limit, struct cma **res_cma,
> -				       bool fixed)
> +static int __init __dma_contiguous_reserve_area(phys_addr_t size,
> +				phys_addr_t base, phys_addr_t limit,
> +				struct cma **res_cma, bool fixed)
>  {
>  	struct cma *cma = &cma_areas[cma_area_count];
>  	phys_addr_t alignment;
> @@ -288,16 +288,30 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
>  
>  	pr_info("%s(): reserved %ld MiB at %08lx\n",
>  		__func__, (unsigned long)size / SZ_1M, (unsigned long)base);
> -
> -	/* Architecture specific contiguous memory fixup. */
> -	dma_contiguous_early_fixup(base, size);
>  	return 0;
> +
>  err:
>  	pr_err("%s(): failed to reserve %ld MiB\n",
>  		__func__, (unsigned long)size / SZ_1M);
>  	return ret;
>  }
>  
> +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> +				       phys_addr_t limit, struct cma **res_cma,
> +				       bool fixed)
> +{
> +	int ret;
> +
> +	ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
> +	if (ret)
> +		return ret;
> +
> +	/* Architecture specific contiguous memory fixup. */
> +	dma_contiguous_early_fixup(base, size);

In old, base and size are aligned with alignment and passed into arch fixup
but your patch is changing it.
I didn't look at what kinds of side effect it makes but just want to confirm.

> +
> +	return 0;
> +}
> +
>  static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>  {
>  	mutex_lock(&cma->lock);
> @@ -316,20 +330,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>   * global one. Requires architecture specific dev_get_cma_area() helper
>   * function.
>   */
> -struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
>  				       unsigned int align)
>  {
>  	unsigned long mask, pfn, pageno, start = 0;
> -	struct cma *cma = dev_get_cma_area(dev);
>  	struct page *page = NULL;
>  	int ret;
>  
>  	if (!cma || !cma->count)
>  		return NULL;
>  
> -	if (align > CONFIG_CMA_ALIGNMENT)
> -		align = CONFIG_CMA_ALIGNMENT;
> -
>  	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
>  		 count, align);
>  
> @@ -377,6 +387,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>  	return page;
>  }
>  

Please move the description in __dma_alloc_from_contiguous to here exported API.

> +struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +				       unsigned int align)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	if (align > CONFIG_CMA_ALIGNMENT)
> +		align = CONFIG_CMA_ALIGNMENT;
> +
> +	return __dma_alloc_from_contiguous(cma, count, align);
> +}
> +
>  /**
>   * dma_release_from_contiguous() - release allocated pages
>   * @dev:   Pointer to device for which the pages were allocated.
> @@ -387,10 +408,9 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>   * It returns false when provided pages do not belong to contiguous area and
>   * true otherwise.
>   */
> -bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
>  				 int count)
>  {
> -	struct cma *cma = dev_get_cma_area(dev);
>  	unsigned long pfn;
>  
>  	if (!cma || !pages)
> @@ -410,3 +430,11 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>  
>  	return true;
>  }
> +

Ditto.

> +bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +				 int count)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	return __dma_release_from_contiguous(cma, pages, count);
> +}
> -- 
> 1.7.9.5

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Minchan Kim <minchan@kernel.org>
To: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: kvm-ppc@vger.kernel.org,
	Russell King - ARM Linux <linux@arm.linux.org.uk>,
	kvm@vger.kernel.org, Gleb Natapov <gleb@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Alexander Graf <agraf@suse.de>,
	Michal Nazarewicz <mina86@mina86.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Paul Mackerras <paulus@samba.org>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org,
	linux-arm-kernel@lists.infradead.org,
	Marek Szyprowski <m.szyprowski@samsung.com>
Subject: Re: [PATCH v2 03/10] DMA, CMA: separate core cma management codes from DMA APIs
Date: Thu, 12 Jun 2014 14:37:43 +0900	[thread overview]
Message-ID: <20140612053743.GF12415@bbox> (raw)
In-Reply-To: <1402543307-29800-4-git-send-email-iamjoonsoo.kim@lge.com>

On Thu, Jun 12, 2014 at 12:21:40PM +0900, Joonsoo Kim wrote:
> To prepare future generalization work on cma area management code,
> we need to separate core cma management codes from DMA APIs.
> We will extend these core functions to cover requirements of
> ppc kvm's cma area management functionality in following patches.
> This separation helps us not to touch DMA APIs while extending
> core functions.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
> index fb0cdce..8a44c82 100644
> --- a/drivers/base/dma-contiguous.c
> +++ b/drivers/base/dma-contiguous.c
> @@ -231,9 +231,9 @@ core_initcall(cma_init_reserved_areas);
>   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
>   * reserve in range from @base to @limit.
>   */
> -int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> -				       phys_addr_t limit, struct cma **res_cma,
> -				       bool fixed)
> +static int __init __dma_contiguous_reserve_area(phys_addr_t size,
> +				phys_addr_t base, phys_addr_t limit,
> +				struct cma **res_cma, bool fixed)
>  {
>  	struct cma *cma = &cma_areas[cma_area_count];
>  	phys_addr_t alignment;
> @@ -288,16 +288,30 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
>  
>  	pr_info("%s(): reserved %ld MiB at %08lx\n",
>  		__func__, (unsigned long)size / SZ_1M, (unsigned long)base);
> -
> -	/* Architecture specific contiguous memory fixup. */
> -	dma_contiguous_early_fixup(base, size);
>  	return 0;
> +
>  err:
>  	pr_err("%s(): failed to reserve %ld MiB\n",
>  		__func__, (unsigned long)size / SZ_1M);
>  	return ret;
>  }
>  
> +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> +				       phys_addr_t limit, struct cma **res_cma,
> +				       bool fixed)
> +{
> +	int ret;
> +
> +	ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
> +	if (ret)
> +		return ret;
> +
> +	/* Architecture specific contiguous memory fixup. */
> +	dma_contiguous_early_fixup(base, size);

In old, base and size are aligned with alignment and passed into arch fixup
but your patch is changing it.
I didn't look at what kinds of side effect it makes but just want to confirm.

> +
> +	return 0;
> +}
> +
>  static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>  {
>  	mutex_lock(&cma->lock);
> @@ -316,20 +330,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>   * global one. Requires architecture specific dev_get_cma_area() helper
>   * function.
>   */
> -struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
>  				       unsigned int align)
>  {
>  	unsigned long mask, pfn, pageno, start = 0;
> -	struct cma *cma = dev_get_cma_area(dev);
>  	struct page *page = NULL;
>  	int ret;
>  
>  	if (!cma || !cma->count)
>  		return NULL;
>  
> -	if (align > CONFIG_CMA_ALIGNMENT)
> -		align = CONFIG_CMA_ALIGNMENT;
> -
>  	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
>  		 count, align);
>  
> @@ -377,6 +387,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>  	return page;
>  }
>  

Please move the description in __dma_alloc_from_contiguous to here exported API.

> +struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +				       unsigned int align)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	if (align > CONFIG_CMA_ALIGNMENT)
> +		align = CONFIG_CMA_ALIGNMENT;
> +
> +	return __dma_alloc_from_contiguous(cma, count, align);
> +}
> +
>  /**
>   * dma_release_from_contiguous() - release allocated pages
>   * @dev:   Pointer to device for which the pages were allocated.
> @@ -387,10 +408,9 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>   * It returns false when provided pages do not belong to contiguous area and
>   * true otherwise.
>   */
> -bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
>  				 int count)
>  {
> -	struct cma *cma = dev_get_cma_area(dev);
>  	unsigned long pfn;
>  
>  	if (!cma || !pages)
> @@ -410,3 +430,11 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>  
>  	return true;
>  }
> +

Ditto.

> +bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +				 int count)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	return __dma_release_from_contiguous(cma, pages, count);
> +}
> -- 
> 1.7.9.5

-- 
Kind regards,
Minchan Kim

WARNING: multiple messages have this Message-ID (diff)
From: minchan@kernel.org (Minchan Kim)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH v2 03/10] DMA, CMA: separate core cma management codes from DMA APIs
Date: Thu, 12 Jun 2014 14:37:43 +0900	[thread overview]
Message-ID: <20140612053743.GF12415@bbox> (raw)
In-Reply-To: <1402543307-29800-4-git-send-email-iamjoonsoo.kim@lge.com>

On Thu, Jun 12, 2014 at 12:21:40PM +0900, Joonsoo Kim wrote:
> To prepare future generalization work on cma area management code,
> we need to separate core cma management codes from DMA APIs.
> We will extend these core functions to cover requirements of
> ppc kvm's cma area management functionality in following patches.
> This separation helps us not to touch DMA APIs while extending
> core functions.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
> index fb0cdce..8a44c82 100644
> --- a/drivers/base/dma-contiguous.c
> +++ b/drivers/base/dma-contiguous.c
> @@ -231,9 +231,9 @@ core_initcall(cma_init_reserved_areas);
>   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
>   * reserve in range from @base to @limit.
>   */
> -int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> -				       phys_addr_t limit, struct cma **res_cma,
> -				       bool fixed)
> +static int __init __dma_contiguous_reserve_area(phys_addr_t size,
> +				phys_addr_t base, phys_addr_t limit,
> +				struct cma **res_cma, bool fixed)
>  {
>  	struct cma *cma = &cma_areas[cma_area_count];
>  	phys_addr_t alignment;
> @@ -288,16 +288,30 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
>  
>  	pr_info("%s(): reserved %ld MiB at %08lx\n",
>  		__func__, (unsigned long)size / SZ_1M, (unsigned long)base);
> -
> -	/* Architecture specific contiguous memory fixup. */
> -	dma_contiguous_early_fixup(base, size);
>  	return 0;
> +
>  err:
>  	pr_err("%s(): failed to reserve %ld MiB\n",
>  		__func__, (unsigned long)size / SZ_1M);
>  	return ret;
>  }
>  
> +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> +				       phys_addr_t limit, struct cma **res_cma,
> +				       bool fixed)
> +{
> +	int ret;
> +
> +	ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
> +	if (ret)
> +		return ret;
> +
> +	/* Architecture specific contiguous memory fixup. */
> +	dma_contiguous_early_fixup(base, size);

In old, base and size are aligned with alignment and passed into arch fixup
but your patch is changing it.
I didn't look at what kinds of side effect it makes but just want to confirm.

> +
> +	return 0;
> +}
> +
>  static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>  {
>  	mutex_lock(&cma->lock);
> @@ -316,20 +330,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>   * global one. Requires architecture specific dev_get_cma_area() helper
>   * function.
>   */
> -struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
>  				       unsigned int align)
>  {
>  	unsigned long mask, pfn, pageno, start = 0;
> -	struct cma *cma = dev_get_cma_area(dev);
>  	struct page *page = NULL;
>  	int ret;
>  
>  	if (!cma || !cma->count)
>  		return NULL;
>  
> -	if (align > CONFIG_CMA_ALIGNMENT)
> -		align = CONFIG_CMA_ALIGNMENT;
> -
>  	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
>  		 count, align);
>  
> @@ -377,6 +387,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>  	return page;
>  }
>  

Please move the description in __dma_alloc_from_contiguous to here exported API.

> +struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +				       unsigned int align)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	if (align > CONFIG_CMA_ALIGNMENT)
> +		align = CONFIG_CMA_ALIGNMENT;
> +
> +	return __dma_alloc_from_contiguous(cma, count, align);
> +}
> +
>  /**
>   * dma_release_from_contiguous() - release allocated pages
>   * @dev:   Pointer to device for which the pages were allocated.
> @@ -387,10 +408,9 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>   * It returns false when provided pages do not belong to contiguous area and
>   * true otherwise.
>   */
> -bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
>  				 int count)
>  {
> -	struct cma *cma = dev_get_cma_area(dev);
>  	unsigned long pfn;
>  
>  	if (!cma || !pages)
> @@ -410,3 +430,11 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>  
>  	return true;
>  }
> +

Ditto.

> +bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +				 int count)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	return __dma_release_from_contiguous(cma, pages, count);
> +}
> -- 
> 1.7.9.5

-- 
Kind regards,
Minchan Kim

WARNING: multiple messages have this Message-ID (diff)
From: Minchan Kim <minchan@kernel.org>
To: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	Marek Szyprowski <m.szyprowski@samsung.com>,
	Michal Nazarewicz <mina86@mina86.com>,
	Russell King - ARM Linux <linux@arm.linux.org.uk>,
	kvm@vger.kernel.org, linux-mm@kvack.org,
	Gleb Natapov <gleb@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Alexander Graf <agraf@suse.de>,
	kvm-ppc@vger.kernel.org, linux-kernel@vger.kernel.org,
	Paul Mackerras <paulus@samba.org>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	linuxppc-dev@lists.ozlabs.org,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v2 03/10] DMA, CMA: separate core cma management codes from DMA APIs
Date: Thu, 12 Jun 2014 05:37:43 +0000	[thread overview]
Message-ID: <20140612053743.GF12415@bbox> (raw)
In-Reply-To: <1402543307-29800-4-git-send-email-iamjoonsoo.kim@lge.com>

On Thu, Jun 12, 2014 at 12:21:40PM +0900, Joonsoo Kim wrote:
> To prepare future generalization work on cma area management code,
> we need to separate core cma management codes from DMA APIs.
> We will extend these core functions to cover requirements of
> ppc kvm's cma area management functionality in following patches.
> This separation helps us not to touch DMA APIs while extending
> core functions.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
> index fb0cdce..8a44c82 100644
> --- a/drivers/base/dma-contiguous.c
> +++ b/drivers/base/dma-contiguous.c
> @@ -231,9 +231,9 @@ core_initcall(cma_init_reserved_areas);
>   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
>   * reserve in range from @base to @limit.
>   */
> -int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> -				       phys_addr_t limit, struct cma **res_cma,
> -				       bool fixed)
> +static int __init __dma_contiguous_reserve_area(phys_addr_t size,
> +				phys_addr_t base, phys_addr_t limit,
> +				struct cma **res_cma, bool fixed)
>  {
>  	struct cma *cma = &cma_areas[cma_area_count];
>  	phys_addr_t alignment;
> @@ -288,16 +288,30 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
>  
>  	pr_info("%s(): reserved %ld MiB at %08lx\n",
>  		__func__, (unsigned long)size / SZ_1M, (unsigned long)base);
> -
> -	/* Architecture specific contiguous memory fixup. */
> -	dma_contiguous_early_fixup(base, size);
>  	return 0;
> +
>  err:
>  	pr_err("%s(): failed to reserve %ld MiB\n",
>  		__func__, (unsigned long)size / SZ_1M);
>  	return ret;
>  }
>  
> +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
> +				       phys_addr_t limit, struct cma **res_cma,
> +				       bool fixed)
> +{
> +	int ret;
> +
> +	ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
> +	if (ret)
> +		return ret;
> +
> +	/* Architecture specific contiguous memory fixup. */
> +	dma_contiguous_early_fixup(base, size);

In old, base and size are aligned with alignment and passed into arch fixup
but your patch is changing it.
I didn't look at what kinds of side effect it makes but just want to confirm.

> +
> +	return 0;
> +}
> +
>  static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>  {
>  	mutex_lock(&cma->lock);
> @@ -316,20 +330,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
>   * global one. Requires architecture specific dev_get_cma_area() helper
>   * function.
>   */
> -struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
>  				       unsigned int align)
>  {
>  	unsigned long mask, pfn, pageno, start = 0;
> -	struct cma *cma = dev_get_cma_area(dev);
>  	struct page *page = NULL;
>  	int ret;
>  
>  	if (!cma || !cma->count)
>  		return NULL;
>  
> -	if (align > CONFIG_CMA_ALIGNMENT)
> -		align = CONFIG_CMA_ALIGNMENT;
> -
>  	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
>  		 count, align);
>  
> @@ -377,6 +387,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>  	return page;
>  }
>  

Please move the description in __dma_alloc_from_contiguous to here exported API.

> +struct page *dma_alloc_from_contiguous(struct device *dev, int count,
> +				       unsigned int align)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	if (align > CONFIG_CMA_ALIGNMENT)
> +		align = CONFIG_CMA_ALIGNMENT;
> +
> +	return __dma_alloc_from_contiguous(cma, count, align);
> +}
> +
>  /**
>   * dma_release_from_contiguous() - release allocated pages
>   * @dev:   Pointer to device for which the pages were allocated.
> @@ -387,10 +408,9 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
>   * It returns false when provided pages do not belong to contiguous area and
>   * true otherwise.
>   */
> -bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
>  				 int count)
>  {
> -	struct cma *cma = dev_get_cma_area(dev);
>  	unsigned long pfn;
>  
>  	if (!cma || !pages)
> @@ -410,3 +430,11 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
>  
>  	return true;
>  }
> +

Ditto.

> +bool dma_release_from_contiguous(struct device *dev, struct page *pages,
> +				 int count)
> +{
> +	struct cma *cma = dev_get_cma_area(dev);
> +
> +	return __dma_release_from_contiguous(cma, pages, count);
> +}
> -- 
> 1.7.9.5

-- 
Kind regards,
Minchan Kim

  parent reply	other threads:[~2014-06-12  5:37 UTC|newest]

Thread overview: 372+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-12  3:21 [PATCH v2 00/10] CMA: generalize CMA reserved area management code Joonsoo Kim
2014-06-12  3:21 ` Joonsoo Kim
2014-06-12  3:21 ` Joonsoo Kim
2014-06-12  3:21 ` Joonsoo Kim
2014-06-12  3:21 ` Joonsoo Kim
2014-06-12  3:21 ` [PATCH v2 01/10] DMA, CMA: clean-up log message Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  4:41   ` Aneesh Kumar K.V
2014-06-12  4:53     ` Aneesh Kumar K.V
2014-06-12  4:41     ` Aneesh Kumar K.V
2014-06-12  4:41     ` Aneesh Kumar K.V
2014-06-12  4:41     ` Aneesh Kumar K.V
2014-06-12  4:41     ` Aneesh Kumar K.V
2014-06-12  5:53     ` Joonsoo Kim
2014-06-12  5:53       ` Joonsoo Kim
2014-06-12  5:53       ` Joonsoo Kim
2014-06-12  5:53       ` Joonsoo Kim
2014-06-12  5:53       ` Joonsoo Kim
2014-06-12  8:55       ` Michal Nazarewicz
2014-06-12  8:55         ` Michal Nazarewicz
2014-06-12  8:55         ` Michal Nazarewicz
2014-06-12  8:55         ` Michal Nazarewicz
2014-06-12  8:55         ` Michal Nazarewicz
2014-06-12  9:53         ` Michal Nazarewicz
2014-06-12  9:53           ` Michal Nazarewicz
2014-06-12  9:53           ` Michal Nazarewicz
2014-06-12  9:53           ` Michal Nazarewicz
2014-06-12  9:53           ` Michal Nazarewicz
2014-06-16  5:18           ` Joonsoo Kim
2014-06-16  5:18             ` Joonsoo Kim
2014-06-16  5:18             ` Joonsoo Kim
2014-06-16  5:18             ` Joonsoo Kim
2014-06-16  5:18             ` Joonsoo Kim
2014-06-16  5:18             ` Joonsoo Kim
2014-06-12  5:18   ` Minchan Kim
2014-06-12  5:18     ` Minchan Kim
2014-06-12  5:18     ` Minchan Kim
2014-06-12  5:18     ` Minchan Kim
2014-06-12  5:18     ` Minchan Kim
2014-06-12  5:55     ` Joonsoo Kim
2014-06-12  5:55       ` Joonsoo Kim
2014-06-12  5:55       ` Joonsoo Kim
2014-06-12  5:55       ` Joonsoo Kim
2014-06-12  5:55       ` Joonsoo Kim
2014-06-12  8:15   ` Zhang Yanfei
2014-06-12  8:15     ` Zhang Yanfei
2014-06-12  8:15     ` Zhang Yanfei
2014-06-12  8:15     ` Zhang Yanfei
2014-06-12  8:15     ` Zhang Yanfei
2014-06-12  8:15     ` Zhang Yanfei
2014-06-12  8:56   ` Michal Nazarewicz
2014-06-12  8:56     ` Michal Nazarewicz
2014-06-12  8:56     ` Michal Nazarewicz
2014-06-12  8:56     ` Michal Nazarewicz
2014-06-12  8:56     ` Michal Nazarewicz
2014-06-12  8:56     ` Michal Nazarewicz
2014-06-12  3:21 ` [PATCH v2 02/10] DMA, CMA: fix possible memory leak Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  4:43   ` Aneesh Kumar K.V
2014-06-12  4:55     ` Aneesh Kumar K.V
2014-06-12  4:43     ` Aneesh Kumar K.V
2014-06-12  4:43     ` Aneesh Kumar K.V
2014-06-12  4:43     ` Aneesh Kumar K.V
2014-06-12  4:43     ` Aneesh Kumar K.V
2014-06-12  5:25   ` Minchan Kim
2014-06-12  5:25     ` Minchan Kim
2014-06-12  5:25     ` Minchan Kim
2014-06-12  5:25     ` Minchan Kim
2014-06-12  5:25     ` Minchan Kim
2014-06-12  5:58     ` Joonsoo Kim
2014-06-12  6:02       ` Joonsoo Kim
2014-06-12  6:02       ` Joonsoo Kim
2014-06-12  6:02       ` Joonsoo Kim
2014-06-12  6:02       ` Joonsoo Kim
2014-06-12  8:19       ` Zhang Yanfei
2014-06-12  8:19         ` Zhang Yanfei
2014-06-12  8:19         ` Zhang Yanfei
2014-06-12  8:19         ` Zhang Yanfei
2014-06-12  8:19         ` Zhang Yanfei
2014-06-12  8:19         ` Zhang Yanfei
2014-06-12  9:47   ` Michal Nazarewicz
2014-06-12  9:47     ` Michal Nazarewicz
2014-06-12  9:47     ` Michal Nazarewicz
2014-06-12  9:47     ` Michal Nazarewicz
2014-06-12  9:47     ` Michal Nazarewicz
2014-06-12  9:47     ` Michal Nazarewicz
2014-06-12  3:21 ` [PATCH v2 03/10] DMA, CMA: separate core cma management codes from DMA APIs Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  4:44   ` Aneesh Kumar K.V
2014-06-12  4:56     ` Aneesh Kumar K.V
2014-06-12  4:44     ` Aneesh Kumar K.V
2014-06-12  4:44     ` Aneesh Kumar K.V
2014-06-12  4:44     ` Aneesh Kumar K.V
2014-06-12  4:44     ` Aneesh Kumar K.V
2014-06-12  5:37   ` Minchan Kim [this message]
2014-06-12  5:37     ` Minchan Kim
2014-06-12  5:37     ` Minchan Kim
2014-06-12  5:37     ` Minchan Kim
2014-06-12  5:37     ` Minchan Kim
2014-06-16  5:24     ` Joonsoo Kim
2014-06-16  5:24       ` Joonsoo Kim
2014-06-16  5:24       ` Joonsoo Kim
2014-06-16  5:24       ` Joonsoo Kim
2014-06-16  5:24       ` Joonsoo Kim
2014-06-12  9:55   ` Michal Nazarewicz
2014-06-12  9:55     ` Michal Nazarewicz
2014-06-12  9:55     ` Michal Nazarewicz
2014-06-12  9:55     ` Michal Nazarewicz
2014-06-12  9:55     ` Michal Nazarewicz
2014-06-12  9:55     ` Michal Nazarewicz
2014-06-12  3:21 ` [PATCH v2 04/10] DMA, CMA: support alignment constraint on cma region Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  4:50   ` Aneesh Kumar K.V
2014-06-12  4:50     ` Aneesh Kumar K.V
2014-06-12  4:50     ` Aneesh Kumar K.V
2014-06-12  4:50     ` Aneesh Kumar K.V
2014-06-12  4:50     ` Aneesh Kumar K.V
2014-06-12  4:50     ` Aneesh Kumar K.V
2014-06-12  5:52   ` Minchan Kim
2014-06-12  5:52     ` Minchan Kim
2014-06-12  5:52     ` Minchan Kim
2014-06-12  5:52     ` Minchan Kim
2014-06-12  5:52     ` Minchan Kim
2014-06-12  6:07     ` Joonsoo Kim
2014-06-12  6:07       ` Joonsoo Kim
2014-06-12  6:07       ` Joonsoo Kim
2014-06-12  6:07       ` Joonsoo Kim
2014-06-12  6:07       ` Joonsoo Kim
2014-06-12 10:02   ` Michal Nazarewicz
2014-06-12 10:02     ` Michal Nazarewicz
2014-06-12 10:02     ` Michal Nazarewicz
2014-06-12 10:02     ` Michal Nazarewicz
2014-06-12 10:02     ` Michal Nazarewicz
2014-06-12 10:02     ` Michal Nazarewicz
2014-06-16  5:19     ` Joonsoo Kim
2014-06-16  5:19       ` Joonsoo Kim
2014-06-16  5:19       ` Joonsoo Kim
2014-06-16  5:19       ` Joonsoo Kim
2014-06-16  5:19       ` Joonsoo Kim
2014-06-16  5:19       ` Joonsoo Kim
2014-06-12  3:21 ` [PATCH v2 05/10] DMA, CMA: support arbitrary bitmap granularity Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  6:06   ` Minchan Kim
2014-06-12  6:06     ` Minchan Kim
2014-06-12  6:06     ` Minchan Kim
2014-06-12  6:06     ` Minchan Kim
2014-06-12  6:06     ` Minchan Kim
2014-06-12  6:43     ` Joonsoo Kim
2014-06-12  6:43       ` Joonsoo Kim
2014-06-12  6:43       ` Joonsoo Kim
2014-06-12  6:43       ` Joonsoo Kim
2014-06-12  6:43       ` Joonsoo Kim
2014-06-12  6:42       ` Minchan Kim
2014-06-12  6:42         ` Minchan Kim
2014-06-12  6:42         ` Minchan Kim
2014-06-12  6:42         ` Minchan Kim
2014-06-12  6:42         ` Minchan Kim
2014-06-12  7:08   ` Minchan Kim
2014-06-12  7:08     ` Minchan Kim
2014-06-12  7:08     ` Minchan Kim
2014-06-12  7:08     ` Minchan Kim
2014-06-12  7:08     ` Minchan Kim
2014-06-12  7:25     ` Zhang Yanfei
2014-06-12  7:25       ` Zhang Yanfei
2014-06-12  7:25       ` Zhang Yanfei
2014-06-12  7:25       ` Zhang Yanfei
2014-06-12  7:25       ` Zhang Yanfei
2014-06-12  7:25       ` Zhang Yanfei
2014-06-12  7:41     ` Joonsoo Kim
2014-06-12  7:41       ` Joonsoo Kim
2014-06-12  7:41       ` Joonsoo Kim
2014-06-12  7:41       ` Joonsoo Kim
2014-06-12  7:41       ` Joonsoo Kim
2014-06-12  8:28   ` Zhang Yanfei
2014-06-12  8:28     ` Zhang Yanfei
2014-06-12  8:28     ` Zhang Yanfei
2014-06-12  8:28     ` Zhang Yanfei
2014-06-12  8:28     ` Zhang Yanfei
2014-06-12  8:28     ` Zhang Yanfei
2014-06-12 10:19   ` Michal Nazarewicz
2014-06-12 10:19     ` Michal Nazarewicz
2014-06-12 10:19     ` Michal Nazarewicz
2014-06-12 10:19     ` Michal Nazarewicz
2014-06-12 10:19     ` Michal Nazarewicz
2014-06-12 10:19     ` Michal Nazarewicz
2014-06-16  5:23     ` Joonsoo Kim
2014-06-16  5:23       ` Joonsoo Kim
2014-06-16  5:23       ` Joonsoo Kim
2014-06-16  5:23       ` Joonsoo Kim
2014-06-16  5:23       ` Joonsoo Kim
2014-06-14 10:09   ` Aneesh Kumar K.V
2014-06-14 10:21     ` Aneesh Kumar K.V
2014-06-14 10:09     ` Aneesh Kumar K.V
2014-06-14 10:09     ` Aneesh Kumar K.V
2014-06-14 10:09     ` Aneesh Kumar K.V
2014-06-14 10:09     ` Aneesh Kumar K.V
2014-06-12  3:21 ` [PATCH v2 06/10] CMA: generalize CMA reserved area management functionality Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  7:13   ` Minchan Kim
2014-06-12  7:13     ` Minchan Kim
2014-06-12  7:13     ` Minchan Kim
2014-06-12  7:13     ` Minchan Kim
2014-06-12  7:13     ` Minchan Kim
2014-06-12  7:42     ` Joonsoo Kim
2014-06-12  7:42       ` Joonsoo Kim
2014-06-12  7:42       ` Joonsoo Kim
2014-06-12  7:42       ` Joonsoo Kim
2014-06-12  7:42       ` Joonsoo Kim
2014-06-12  8:29   ` Zhang Yanfei
2014-06-12  8:29     ` Zhang Yanfei
2014-06-12  8:29     ` Zhang Yanfei
2014-06-12  8:29     ` Zhang Yanfei
2014-06-12  8:29     ` Zhang Yanfei
2014-06-12  8:29     ` Zhang Yanfei
2014-06-14 10:06   ` Aneesh Kumar K.V
2014-06-14 10:18     ` Aneesh Kumar K.V
2014-06-14 10:06     ` Aneesh Kumar K.V
2014-06-14 10:06     ` Aneesh Kumar K.V
2014-06-14 10:06     ` Aneesh Kumar K.V
2014-06-14 10:06     ` Aneesh Kumar K.V
2014-06-14 10:08   ` Aneesh Kumar K.V
2014-06-14 10:20     ` Aneesh Kumar K.V
2014-06-14 10:08     ` Aneesh Kumar K.V
2014-06-14 10:08     ` Aneesh Kumar K.V
2014-06-14 10:08     ` Aneesh Kumar K.V
2014-06-14 10:08     ` Aneesh Kumar K.V
2014-06-14 10:16   ` Aneesh Kumar K.V
2014-06-14 10:28     ` Aneesh Kumar K.V
2014-06-14 10:16     ` Aneesh Kumar K.V
2014-06-14 10:16     ` Aneesh Kumar K.V
2014-06-14 10:16     ` Aneesh Kumar K.V
2014-06-14 10:16     ` Aneesh Kumar K.V
2014-06-16  5:27     ` Joonsoo Kim
2014-06-16  5:27       ` Joonsoo Kim
2014-06-16  5:27       ` Joonsoo Kim
2014-06-16  5:27       ` Joonsoo Kim
2014-06-16  5:27       ` Joonsoo Kim
2014-06-12  3:21 ` [PATCH v2 07/10] PPC, KVM, CMA: use general CMA reserved area management framework Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-14  8:53   ` Aneesh Kumar K.V
2014-06-14  8:54     ` Aneesh Kumar K.V
2014-06-14  8:53     ` Aneesh Kumar K.V
2014-06-14  8:53     ` Aneesh Kumar K.V
2014-06-14  8:53     ` Aneesh Kumar K.V
2014-06-14  8:53     ` Aneesh Kumar K.V
2014-06-16  5:34     ` Joonsoo Kim
2014-06-16  5:34       ` Joonsoo Kim
2014-06-16  5:34       ` Joonsoo Kim
2014-06-16  5:34       ` Joonsoo Kim
2014-06-16  5:34       ` Joonsoo Kim
2014-06-16  7:02       ` Aneesh Kumar K.V
2014-06-16  7:14         ` Aneesh Kumar K.V
2014-06-16  7:02         ` Aneesh Kumar K.V
2014-06-16  7:02         ` Aneesh Kumar K.V
2014-06-16  7:02         ` Aneesh Kumar K.V
2014-06-14 10:05   ` Aneesh Kumar K.V
2014-06-14 10:17     ` Aneesh Kumar K.V
2014-06-14 10:05     ` Aneesh Kumar K.V
2014-06-14 10:05     ` Aneesh Kumar K.V
2014-06-14 10:05     ` Aneesh Kumar K.V
2014-06-14 10:05     ` Aneesh Kumar K.V
2014-06-16  5:29     ` Joonsoo Kim
2014-06-16  5:29       ` Joonsoo Kim
2014-06-16  5:29       ` Joonsoo Kim
2014-06-16  5:29       ` Joonsoo Kim
2014-06-16  5:29       ` Joonsoo Kim
2014-06-12  3:21 ` [PATCH v2 08/10] mm, cma: clean-up cma allocation error path Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  7:16   ` Minchan Kim
2014-06-12  7:16     ` Minchan Kim
2014-06-12  7:16     ` Minchan Kim
2014-06-12  7:16     ` Minchan Kim
2014-06-12  7:16     ` Minchan Kim
2014-06-12  8:31   ` Zhang Yanfei
2014-06-12  8:31     ` Zhang Yanfei
2014-06-12  8:31     ` Zhang Yanfei
2014-06-12  8:31     ` Zhang Yanfei
2014-06-12  8:31     ` Zhang Yanfei
2014-06-12  8:31     ` Zhang Yanfei
2014-06-12 11:34   ` Michal Nazarewicz
2014-06-12 11:34     ` Michal Nazarewicz
2014-06-12 11:34     ` Michal Nazarewicz
2014-06-12 11:34     ` Michal Nazarewicz
2014-06-12 11:34     ` Michal Nazarewicz
2014-06-12 11:34     ` Michal Nazarewicz
2014-06-14  7:18   ` Aneesh Kumar K.V
2014-06-14  7:30     ` Aneesh Kumar K.V
2014-06-14  7:18     ` Aneesh Kumar K.V
2014-06-14  7:18     ` Aneesh Kumar K.V
2014-06-14  7:18     ` Aneesh Kumar K.V
2014-06-12  3:21 ` [PATCH v2 09/10] mm, cma: move output param to the end of param list Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  7:19   ` Minchan Kim
2014-06-12  7:19     ` Minchan Kim
2014-06-12  7:19     ` Minchan Kim
2014-06-12  7:19     ` Minchan Kim
2014-06-12  7:19     ` Minchan Kim
2014-06-12  7:43     ` Joonsoo Kim
2014-06-12  7:43       ` Joonsoo Kim
2014-06-12  7:43       ` Joonsoo Kim
2014-06-12  7:43       ` Joonsoo Kim
2014-06-12  7:43       ` Joonsoo Kim
2014-06-12 11:38   ` Michal Nazarewicz
2014-06-12 11:38     ` Michal Nazarewicz
2014-06-12 11:38     ` Michal Nazarewicz
2014-06-12 11:38     ` Michal Nazarewicz
2014-06-12 11:38     ` Michal Nazarewicz
2014-06-12 11:38     ` Michal Nazarewicz
2014-06-14  7:20   ` Aneesh Kumar K.V
2014-06-14  7:32     ` Aneesh Kumar K.V
2014-06-14  7:20     ` Aneesh Kumar K.V
2014-06-14  7:20     ` Aneesh Kumar K.V
2014-06-14  7:20     ` Aneesh Kumar K.V
2014-06-14  7:20     ` Aneesh Kumar K.V
2014-06-12  3:21 ` [PATCH v2 10/10] mm, cma: use spinlock instead of mutex Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  3:21   ` Joonsoo Kim
2014-06-12  7:40   ` Minchan Kim
2014-06-12  7:40     ` Minchan Kim
2014-06-12  7:40     ` Minchan Kim
2014-06-12  7:40     ` Minchan Kim
2014-06-12  7:40     ` Minchan Kim
2014-06-12  7:56     ` Joonsoo Kim
2014-06-12  7:56       ` Joonsoo Kim
2014-06-12  7:56       ` Joonsoo Kim
2014-06-12  7:56       ` Joonsoo Kim
2014-06-12  7:56       ` Joonsoo Kim
2014-06-14  7:25 ` [PATCH v2 00/10] CMA: generalize CMA reserved area management code Aneesh Kumar K.V
2014-06-14  7:37   ` Aneesh Kumar K.V
2014-06-14  7:25   ` Aneesh Kumar K.V
2014-06-14  7:25   ` Aneesh Kumar K.V
2014-06-14  7:25   ` Aneesh Kumar K.V
2014-06-14  7:25   ` Aneesh Kumar K.V
2014-06-16  5:32   ` Joonsoo Kim
2014-06-16  5:32     ` Joonsoo Kim
2014-06-16  5:32     ` Joonsoo Kim
2014-06-16  5:32     ` Joonsoo Kim
2014-06-16  5:32     ` Joonsoo Kim
2014-06-16  7:04     ` Aneesh Kumar K.V
2014-06-16  7:16       ` Aneesh Kumar K.V
2014-06-16  7:04       ` Aneesh Kumar K.V
2014-06-16  7:04       ` Aneesh Kumar K.V
2014-06-16  7:04       ` Aneesh Kumar K.V

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20140612053743.GF12415@bbox \
    --to=minchan@kernel.org \
    --cc=agraf@suse.de \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=gleb@kernel.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux@arm.linux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=m.szyprowski@samsung.com \
    --cc=mina86@mina86.com \
    --cc=paulus@samba.org \
    --cc=pbonzini@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.