All of lore.kernel.org
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH] x86_64: optimise muldiv64 for x86_64 architecture
@ 2015-01-09  9:53 Frediano Ziglio
  2015-01-09 20:00 ` Richard Henderson
  0 siblings, 1 reply; 8+ messages in thread
From: Frediano Ziglio @ 2015-01-09  9:53 UTC (permalink / raw)
  To: Paolo Bonzini, Anthony Liguori, Stefan Hajnoczi
  Cc: Frediano Ziglio, qemu-devel

As this platform can do multiply/divide using 128 bit precision use
these instruction to implement it.

Signed-off-by: Frediano Ziglio <frediano.ziglio@huawei.com>
---
 include/qemu-common.h | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/include/qemu-common.h b/include/qemu-common.h
index f862214..5366220 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -370,6 +370,7 @@ static inline uint8_t from_bcd(uint8_t val)
 }
 
 /* compute with 96 bit intermediate result: (a*b)/c */
+#ifndef __x86_64__
 static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
 {
     union {
@@ -392,6 +393,18 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
     res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
     return res.ll;
 }
+#else
+static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
+{
+    uint64_t res;
+
+    asm ("mulq %2\n\tdivq %3"
+         : "=a"(res)
+         : "a"(a), "qm"((uint64_t) b), "qm"((uint64_t)c)
+         : "rdx", "cc");
+    return res;
+}
+#endif
 
 /* Round number down to multiple */
 #define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH] x86_64: optimise muldiv64 for x86_64 architecture
@ 2015-01-09 10:27 Frediano Ziglio
  2015-01-09 10:35 ` Paolo Bonzini
  0 siblings, 1 reply; 8+ messages in thread
From: Frediano Ziglio @ 2015-01-09 10:27 UTC (permalink / raw)
  To: Paolo Bonzini, Anthony Liguori, Stefan Hajnoczi
  Cc: Frediano Ziglio, qemu-devel

As this platform can do multiply/divide using 128 bit precision use
these instruction to implement it.

Signed-off-by: Frediano Ziglio <frediano.ziglio@huawei.com>
---
 include/qemu-common.h | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/include/qemu-common.h b/include/qemu-common.h
index f862214..5366220 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -370,6 +370,7 @@ static inline uint8_t from_bcd(uint8_t val)
 }
 
 /* compute with 96 bit intermediate result: (a*b)/c */
+#ifndef __x86_64__
 static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
 {
     union {
@@ -392,6 +393,18 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
     res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
     return res.ll;
 }
+#else
+static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
+{
+    uint64_t res;
+
+    asm ("mulq %2\n\tdivq %3"
+         : "=a"(res)
+         : "a"(a), "qm"((uint64_t) b), "qm"((uint64_t)c)
+         : "rdx", "cc");
+    return res;
+}
+#endif
 
 /* Round number down to multiple */
 #define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-01-09 20:00 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-09  9:53 [Qemu-devel] [PATCH] x86_64: optimise muldiv64 for x86_64 architecture Frediano Ziglio
2015-01-09 20:00 ` Richard Henderson
2015-01-09 10:27 Frediano Ziglio
2015-01-09 10:35 ` Paolo Bonzini
2015-01-09 11:04   ` Frediano Ziglio
2015-01-09 11:24     ` Paolo Bonzini
2015-01-09 11:38       ` Peter Maydell
2015-01-09 12:07       ` Frediano Ziglio

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.