All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chao Zhu <chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>
To: dev-VfR2kkLFssw@public.gmane.org
Subject: [PATCH v3 07/14] Add vector memcpy for IBM Power architecture
Date: Sun, 23 Nov 2014 20:22:15 -0500	[thread overview]
Message-ID: <1416792142-23132-8-git-send-email-chaozhu@linux.vnet.ibm.com> (raw)
In-Reply-To: <1416792142-23132-1-git-send-email-chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>

The SSE based memory copy in DPDK only support x86. This patch adds
altivec based memory copy functions for IBM Power architecture. This
patch includes altivec.h which requires GCC version>= 4.8.

Signed-off-by: Chao Zhu <chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>
---
 .../common/include/arch/ppc_64/rte_memcpy.h        |  224 ++++++++++++++++++++
 1 files changed, 224 insertions(+), 0 deletions(-)
 create mode 100644 lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h

diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h b/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
new file mode 100644
index 0000000..b9b8ddc
--- /dev/null
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
@@ -0,0 +1,224 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) IBM Corporation 2014.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of IBM Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _RTE_MEMCPY_PPC_64_H_
+#define _RTE_MEMCPY_PPC_64_H_
+
+#include <stdint.h>
+#include <string.h>
+/*To include altivec.h, GCC version must  >= 4.8 */
+#include <altivec.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_memcpy.h"
+
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+	vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+}
+
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+	vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+	vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+}
+
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+	vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+	vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+	vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+}
+
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+	vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+	vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+	vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+	vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
+}
+
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+	vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+	vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+	vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+	vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
+	vec_vsx_st(vec_vsx_ld(64, src), 64, dst);
+	vec_vsx_st(vec_vsx_ld(80, src), 80, dst);
+	vec_vsx_st(vec_vsx_ld(96, src), 96, dst);
+	vec_vsx_st(vec_vsx_ld(112, src), 112, dst);
+}
+
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+	rte_mov128(dst, src);
+	rte_mov128(dst + 128, src + 128);
+}
+
+#define rte_memcpy(dst, src, n)              \
+	((__builtin_constant_p(n)) ?          \
+	memcpy((dst), (src), (n)) :          \
+	rte_memcpy_func((dst), (src), (n)))
+
+static inline void *
+rte_memcpy_func(void *dst, const void *src, size_t n)
+{
+	void *ret = dst;
+
+	/* We can't copy < 16 bytes using XMM registers so do it manually. */
+	if (n < 16) {
+		if (n & 0x01) {
+			*(uint8_t *)dst = *(const uint8_t *)src;
+			dst = (uint8_t *)dst + 1;
+			src = (const uint8_t *)src + 1;
+		}
+		if (n & 0x02) {
+			*(uint16_t *)dst = *(const uint16_t *)src;
+			dst = (uint16_t *)dst + 1;
+			src = (const uint16_t *)src + 1;
+		}
+		if (n & 0x04) {
+			*(uint32_t *)dst = *(const uint32_t *)src;
+			dst = (uint32_t *)dst + 1;
+			src = (const uint32_t *)src + 1;
+		}
+		if (n & 0x08) {
+			*(uint64_t *)dst = *(const uint64_t *)src;
+		}
+		return ret;
+	}
+
+	/* Special fast cases for <= 128 bytes */
+	if (n <= 32) {
+		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+		return ret;
+	}
+
+	if (n <= 64) {
+		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+		rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+		return ret;
+	}
+
+	if (n <= 128) {
+		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+		rte_mov64((uint8_t *)dst - 64 + n, (const uint8_t *)src - 64 + n);
+		return ret;
+	}
+
+	/*
+	 * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
+	 * copies was found to be faster than doing 128 and 32 byte copies as
+	 * well.
+	 */
+	for ( ; n >= 256; n -= 256) {
+		rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+		dst = (uint8_t *)dst + 256;
+		src = (const uint8_t *)src + 256;
+	}
+
+	/*
+	 * We split the remaining bytes (which will be less than 256) into
+	 * 64byte (2^6) chunks.
+	 * Using incrementing integers in the case labels of a switch statement
+	 * enourages the compiler to use a jump table. To get incrementing
+	 * integers, we shift the 2 relevant bits to the LSB position to first
+	 * get decrementing integers, and then subtract.
+	 */
+	switch (3 - (n >> 6)) {
+	case 0x00:
+		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+		n -= 64;
+		dst = (uint8_t *)dst + 64;
+		src = (const uint8_t *)src + 64;      /* fallthrough */
+	case 0x01:
+		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+		n -= 64;
+		dst = (uint8_t *)dst + 64;
+		src = (const uint8_t *)src + 64;      /* fallthrough */
+	case 0x02:
+		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+		n -= 64;
+		dst = (uint8_t *)dst + 64;
+		src = (const uint8_t *)src + 64;      /* fallthrough */
+	default:
+		;
+	}
+
+	/*
+	 * We split the remaining bytes (which will be less than 64) into
+	 * 16byte (2^4) chunks, using the same switch structure as above.
+	 */
+	switch (3 - (n >> 4)) {
+	case 0x00:
+		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+		n -= 16;
+		dst = (uint8_t *)dst + 16;
+		src = (const uint8_t *)src + 16;      /* fallthrough */
+	case 0x01:
+		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+		n -= 16;
+		dst = (uint8_t *)dst + 16;
+		src = (const uint8_t *)src + 16;      /* fallthrough */
+	case 0x02:
+		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+		n -= 16;
+		dst = (uint8_t *)dst + 16;
+		src = (const uint8_t *)src + 16;      /* fallthrough */
+	default:
+		;
+	}
+
+	/* Copy any remaining bytes, without going beyond end of buffers */
+	if (n != 0) {
+		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+	}
+	return ret;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_PPC_64_H_ */
+
-- 
1.7.1

  parent reply	other threads:[~2014-11-24  1:22 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-11-24  1:22 [PATCH v3 00/14] Patches for DPDK to support Power architecture Chao Zhu
     [not found] ` <1416792142-23132-1-git-send-email-chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>
2014-11-24  1:22   ` [PATCH v3 01/14] Add compiling definations for IBM " Chao Zhu
     [not found]     ` <1416792142-23132-2-git-send-email-chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>
2014-11-23 22:02       ` Neil Horman
     [not found]         ` <20141123220246.GA27315-B26myB8xz7F8NnZeBjwnZQMhkBWG/bsMQH7oEaQurus@public.gmane.org>
2014-11-25  3:51           ` Chao Zhu
     [not found]             ` <5473FCB1.9080507-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>
2014-11-25  8:44               ` Bruce Richardson
2014-11-25  9:19                 ` Chao Zhu
2014-11-24  1:22   ` [PATCH v3 02/14] Add atomic operations " Chao Zhu
2014-11-24  1:22   ` [PATCH v3 03/14] Add byte order " Chao Zhu
2014-11-24  8:11     ` Qiu, Michael
     [not found]       ` <533710CFB86FA344BFBF2D6802E60286C99F43-0J0gbvR4kThpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2014-11-26  2:35         ` Chao Zhu
2014-11-24  1:22   ` [PATCH v3 04/14] Add CPU cycle " Chao Zhu
2014-11-24  1:22   ` [PATCH v3 05/14] Add prefetch operation " Chao Zhu
2014-11-24  1:22   ` [PATCH v3 06/14] Add spinlock " Chao Zhu
2014-11-24  1:22   ` Chao Zhu [this message]
2014-11-24  1:22   ` [PATCH v3 08/14] Add CPU flag checking " Chao Zhu
     [not found]     ` <1416792142-23132-9-git-send-email-chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>
2014-11-24 14:14       ` Neil Horman
     [not found]         ` <20141124141453.GC6038-B26myB8xz7F8NnZeBjwnZQMhkBWG/bsMQH7oEaQurus@public.gmane.org>
2014-11-25  3:27           ` Chao Zhu
     [not found]             ` <5473F723.1090209-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>
2014-11-25 11:37               ` Neil Horman
2014-11-24  1:22   ` [PATCH v3 09/14] Remove iopl operation " Chao Zhu
2014-11-24  1:22   ` [PATCH v3 10/14] Add cache size define for IBM Power Architecture Chao Zhu
2014-11-24  1:22   ` [PATCH v3 11/14] Add huge page size define for IBM Power architecture Chao Zhu
2014-11-24  1:22   ` [PATCH v3 12/14] Add eal memory support for IBM Power Architecture Chao Zhu
     [not found]     ` <1416792142-23132-13-git-send-email-chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8@public.gmane.org>
2014-11-24 15:17       ` David Marchand
     [not found]         ` <CALwxeUv2QC1iEVS5mSXAYRZtHNuJxdHMFamT_6RPe2sJ6+Ohiw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2014-11-24 15:18           ` [PATCH] eal: fix remaining checks for other 64bits architectures David Marchand
     [not found]             ` <1416842331-7716-1-git-send-email-david.marchand-pdR9zngts4EAvxtiuMwx3w@public.gmane.org>
2014-11-24 15:58               ` chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8
     [not found]                 ` <20141124105811.Horde.yzi-ZleYQoe06Ldh-qmKsQ6-yBcKIWmi0QMlKIYzOP1THwC/G2K4zDHf@public.gmane.org>
2014-11-27  7:47                   ` Thomas Monjalon
2014-11-24  1:22   ` [PATCH v3 13/14] test_memzone:fix finding the second smallest segment Chao Zhu
2014-11-24  1:22   ` [PATCH v3 14/14] Fix the compiling of test-pmd on IBM Power Architecture Chao Zhu
2014-11-24 15:05   ` [PATCH v3 00/14] Patches for DPDK to support Power architecture David Marchand
     [not found]     ` <CALwxeUuwskDjPDD+spMtbAQtmJq6am82cFtvv1Kj4s9c3LOZ9w-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2014-11-24 15:49       ` chaozhu-23VcF4HTsmIX0ybBhKVfKdBPR1lH4CV8
2014-11-25  2:49       ` Chao Zhu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1416792142-23132-8-git-send-email-chaozhu@linux.vnet.ibm.com \
    --to=chaozhu-23vcf4htsmix0ybbhkvfkdbpr1lh4cv8@public.gmane.org \
    --cc=dev-VfR2kkLFssw@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.