All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] Introducing functionality for a group of 4 time64 syscalls
@ 2020-07-27 11:23 Filip Bozuta
  2020-07-27 11:23 ` [PATCH v2 1/2] linux-user: Add support for two 'clock_nanosleep_time64()' and 'clock_adjtime64()' Filip Bozuta
  2020-07-27 11:23 ` [PATCH v2 2/2] linux-user: Add support for 'rt_sigtimedwait_time64()' and 'sched_rr_get_interval_time64()' Filip Bozuta
  0 siblings, 2 replies; 5+ messages in thread
From: Filip Bozuta @ 2020-07-27 11:23 UTC (permalink / raw)
  To: qemu-devel; +Cc: Riku Voipio, Laurent Vivier, Filip Bozuta

This two patch series introduces functionality for a group
of 4 2038 safe syscalls.

The list of implemented syscalls and implementation details
can be found in the patch commit messages.

Testing method:

    The implementation of the implemented syscalls was tested
    using already existing tests from LTP test suite which
    was built inside chroot.

*v2:
    -Added check for 'clock_nanosleep_time64()' which returns
     '-TARGET_EFAULT' if conversion of 'struct timespec64'
     between host and target fails

    -Removed unnecesary special errno handling for 'PPC'

Filip Bozuta (2):
  linux-user: Add support for two 'clock_nanosleep_time64()' and
    'clock_adjtime64()'
  linux-user: Add support for 'rt_sigtimedwait_time64()' and
    'sched_rr_get_interval_time64()'

 linux-user/syscall.c      | 192 +++++++++++++++++++++++++++++++++++++-
 linux-user/syscall_defs.h |  31 ++++++
 2 files changed, 221 insertions(+), 2 deletions(-)

-- 
2.25.1



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 1/2] linux-user: Add support for two 'clock_nanosleep_time64()' and 'clock_adjtime64()'
  2020-07-27 11:23 [PATCH v2 0/2] Introducing functionality for a group of 4 time64 syscalls Filip Bozuta
@ 2020-07-27 11:23 ` Filip Bozuta
  2020-08-24 16:40   ` Laurent Vivier
  2020-07-27 11:23 ` [PATCH v2 2/2] linux-user: Add support for 'rt_sigtimedwait_time64()' and 'sched_rr_get_interval_time64()' Filip Bozuta
  1 sibling, 1 reply; 5+ messages in thread
From: Filip Bozuta @ 2020-07-27 11:23 UTC (permalink / raw)
  To: qemu-devel; +Cc: Riku Voipio, Laurent Vivier, Filip Bozuta

This patch implements functionality for following time64 syscall:

*clock_nanosleep_time64()

    This is a year 2038 safe vairant of syscall:
    int clock_nanosleep(clockid_t clockid, int flags,
                        const struct timespec *request,
                        struct timespec *remain)
    --high-resolution sleep with specifiable clock--
    man page: https://man7.org/linux/man-pages/man2/clock_nanosleep.2.html

*clock_adjtime64()

    This is a year 2038 safe variant of syscall:
    int clock_adjtime(clockid_t clk_id, struct timex *buf)
    --tune kernel clock--
    man page: https://man7.org/linux/man-pages/man2/clock_adjtime.2.html

Implementation notes:

    Syscall 'clock_nanosleep_time64()' was implemented similarly
    to syscall 'clock_nanosleep()' except that 'host_to_target_timespec64()'
    and 'target_to_host_timespec64()' were used instead of the regular
    'host_to_target_timespec()' and 'target_to_host_timespec()'.

    For 'clock_adjtime64()' a 64-bit target kernel version of 'struct timex'
    was defined in 'syscall_defs.h': 'struct target__kernel_timex'.
    This type was used to convert the values of 64-bit timex type between
    host and target. For this purpose a 64-bit timex converting functions
    'target_to_host_timex64()' and 'host_to_target_timex64()'. An existing
    function 'copy_to_user_timeval64()' was used to convert the field
    'time' which if of type 'struct timeval' from host to target.
    Function 'copy_from_user_timveal64()' was added in this patch and
    used to convert the 'time' field from target to host.

Signed-off-by: Filip Bozuta <Filip.Bozuta@syrmia.com>
---
 linux-user/syscall.c      | 139 +++++++++++++++++++++++++++++++++++++-
 linux-user/syscall_defs.h |  31 +++++++++
 2 files changed, 168 insertions(+), 2 deletions(-)

diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 05f03919ff..c1b36ea698 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -809,7 +809,8 @@ safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
 safe_syscall2(int, nanosleep, const struct timespec *, req,
               struct timespec *, rem)
 #endif
-#ifdef TARGET_NR_clock_nanosleep
+#if defined(TARGET_NR_clock_nanosleep) || \
+    defined(TARGET_NR_clock_nanosleep_time64)
 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
               const struct timespec *, req, struct timespec *, rem)
 #endif
@@ -1205,8 +1206,25 @@ static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
     return 0;
 }
 
+static inline abi_long copy_from_user_timeval64(struct timeval *tv,
+                                                abi_ulong target_tv_addr)
+{
+    struct target__kernel_sock_timeval *target_tv;
+
+    if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
+        return -TARGET_EFAULT;
+    }
+
+    __get_user(tv->tv_sec, &target_tv->tv_sec);
+    __get_user(tv->tv_usec, &target_tv->tv_usec);
+
+    unlock_user_struct(target_tv, target_tv_addr, 0);
+
+    return 0;
+}
+
 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
-                                             const struct timeval *tv)
+                                              const struct timeval *tv)
 {
     struct target__kernel_sock_timeval *target_tv;
 
@@ -6771,6 +6789,87 @@ static inline abi_long host_to_target_timex(abi_long target_addr,
 }
 #endif
 
+
+#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
+static inline abi_long target_to_host_timex64(struct timex *host_tx,
+                                              abi_long target_addr)
+{
+    struct target__kernel_timex *target_tx;
+
+    if (copy_from_user_timeval64(&host_tx->time, target_addr +
+                                 offsetof(struct target__kernel_timex,
+                                          time))) {
+        return -TARGET_EFAULT;
+    }
+
+    if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
+        return -TARGET_EFAULT;
+    }
+
+    __get_user(host_tx->modes, &target_tx->modes);
+    __get_user(host_tx->offset, &target_tx->offset);
+    __get_user(host_tx->freq, &target_tx->freq);
+    __get_user(host_tx->maxerror, &target_tx->maxerror);
+    __get_user(host_tx->esterror, &target_tx->esterror);
+    __get_user(host_tx->status, &target_tx->status);
+    __get_user(host_tx->constant, &target_tx->constant);
+    __get_user(host_tx->precision, &target_tx->precision);
+    __get_user(host_tx->tolerance, &target_tx->tolerance);
+    __get_user(host_tx->tick, &target_tx->tick);
+    __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
+    __get_user(host_tx->jitter, &target_tx->jitter);
+    __get_user(host_tx->shift, &target_tx->shift);
+    __get_user(host_tx->stabil, &target_tx->stabil);
+    __get_user(host_tx->jitcnt, &target_tx->jitcnt);
+    __get_user(host_tx->calcnt, &target_tx->calcnt);
+    __get_user(host_tx->errcnt, &target_tx->errcnt);
+    __get_user(host_tx->stbcnt, &target_tx->stbcnt);
+    __get_user(host_tx->tai, &target_tx->tai);
+
+    unlock_user_struct(target_tx, target_addr, 0);
+    return 0;
+}
+
+static inline abi_long host_to_target_timex64(abi_long target_addr,
+                                              struct timex *host_tx)
+{
+    struct target__kernel_timex *target_tx;
+
+   if (copy_to_user_timeval64(target_addr +
+                              offsetof(struct target__kernel_timex, time),
+                              &host_tx->time)) {
+        return -TARGET_EFAULT;
+    }
+
+    if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
+        return -TARGET_EFAULT;
+    }
+
+    __put_user(host_tx->modes, &target_tx->modes);
+    __put_user(host_tx->offset, &target_tx->offset);
+    __put_user(host_tx->freq, &target_tx->freq);
+    __put_user(host_tx->maxerror, &target_tx->maxerror);
+    __put_user(host_tx->esterror, &target_tx->esterror);
+    __put_user(host_tx->status, &target_tx->status);
+    __put_user(host_tx->constant, &target_tx->constant);
+    __put_user(host_tx->precision, &target_tx->precision);
+    __put_user(host_tx->tolerance, &target_tx->tolerance);
+    __put_user(host_tx->tick, &target_tx->tick);
+    __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
+    __put_user(host_tx->jitter, &target_tx->jitter);
+    __put_user(host_tx->shift, &target_tx->shift);
+    __put_user(host_tx->stabil, &target_tx->stabil);
+    __put_user(host_tx->jitcnt, &target_tx->jitcnt);
+    __put_user(host_tx->calcnt, &target_tx->calcnt);
+    __put_user(host_tx->errcnt, &target_tx->errcnt);
+    __put_user(host_tx->stbcnt, &target_tx->stbcnt);
+    __put_user(host_tx->tai, &target_tx->tai);
+
+    unlock_user_struct(target_tx, target_addr, 1);
+    return 0;
+}
+#endif
+
 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
                                                abi_ulong target_addr)
 {
@@ -9726,6 +9825,23 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
             }
         }
         return ret;
+#endif
+#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
+    case TARGET_NR_clock_adjtime64:
+        {
+            struct timex htx, *phtx = &htx;
+
+            if (target_to_host_timex64(phtx, arg2) != 0) {
+                return -TARGET_EFAULT;
+            }
+            ret = get_errno(clock_adjtime(arg1, phtx));
+            if (!is_error(ret) && phtx) {
+                if (host_to_target_timex64(arg2, phtx) != 0) {
+                    return -TARGET_EFAULT;
+                }
+            }
+        }
+        return ret;
 #endif
     case TARGET_NR_getpgid:
         return get_errno(getpgid(arg1));
@@ -11684,6 +11800,25 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
         return ret;
     }
 #endif
+#ifdef TARGET_NR_clock_nanosleep_time64
+    case TARGET_NR_clock_nanosleep_time64:
+    {
+        struct timespec ts;
+
+        if (target_to_host_timespec64(&ts, arg3)) {
+            return -TARGET_EFAULT;
+        }
+
+        ret = get_errno(safe_clock_nanosleep(arg1, arg2,
+                                             &ts, arg4 ? &ts : NULL));
+
+        if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
+            host_to_target_timespec64(arg4, &ts)) {
+            return -TARGET_EFAULT;
+        }
+        return ret;
+    }
+#endif
 
 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
     case TARGET_NR_set_tid_address:
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 152ec637cb..3783c5a07e 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -287,6 +287,37 @@ struct target_timex {
     abi_int:32; abi_int:32; abi_int:32;
 };
 
+struct target__kernel_timex {
+    abi_uint modes;               /* Mode selector */
+    abi_int: 32;                  /* pad */
+    abi_llong offset;             /* Time offset */
+    abi_llong freq;               /* Frequency offset */
+    abi_llong maxerror;           /* Maximum error (microseconds) */
+    abi_llong esterror;           /* Estimated error (microseconds) */
+    abi_int status;               /* Clock command/status */
+    abi_int: 32;                  /* pad */
+    abi_llong constant;           /* PLL (phase-locked loop) time constant */
+    abi_llong precision;          /* Clock precision (microseconds, ro) */
+    abi_llong tolerance;          /* Clock freq. tolerance (ppm, ro) */
+    struct target__kernel_sock_timeval time;  /* Current time */
+    abi_llong tick;               /* Microseconds between clock ticks */
+    abi_llong ppsfreq;            /* PPS (pulse per second) frequency */
+    abi_llong jitter;             /* PPS jitter (ro); nanoseconds */
+    abi_int shift;                /* PPS interval duration (seconds) */
+    abi_int: 32;                  /* pad */
+    abi_llong stabil;             /* PPS stability */
+    abi_llong jitcnt;             /* PPS jitter limit exceeded (ro) */
+    abi_llong calcnt;             /* PPS calibration intervals */
+    abi_llong errcnt;             /* PPS calibration errors */
+    abi_llong stbcnt;             /* PPS stability limit exceeded */
+    abi_int tai;                  /* TAI offset */
+
+    /* Further padding bytes to allow for future expansion */
+    abi_int:32; abi_int:32; abi_int:32; abi_int:32;
+    abi_int:32; abi_int:32; abi_int:32; abi_int:32;
+    abi_int:32; abi_int:32; abi_int:32;
+};
+
 typedef abi_long target_clock_t;
 
 #define TARGET_HZ 100
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v2 2/2] linux-user: Add support for 'rt_sigtimedwait_time64()' and 'sched_rr_get_interval_time64()'
  2020-07-27 11:23 [PATCH v2 0/2] Introducing functionality for a group of 4 time64 syscalls Filip Bozuta
  2020-07-27 11:23 ` [PATCH v2 1/2] linux-user: Add support for two 'clock_nanosleep_time64()' and 'clock_adjtime64()' Filip Bozuta
@ 2020-07-27 11:23 ` Filip Bozuta
  2020-08-24 16:43   ` Laurent Vivier
  1 sibling, 1 reply; 5+ messages in thread
From: Filip Bozuta @ 2020-07-27 11:23 UTC (permalink / raw)
  To: qemu-devel; +Cc: Riku Voipio, Laurent Vivier, Filip Bozuta

This patch implements functionality for following time64 syscalls:

*rt_sigtimedwait_time64()

    This is a year 2038 safe variant of syscall:

    int rt_sigtimedwait(const sigset_t *set, siginfo_t *info,
                        const struct timespec *timeout, size_t sigsetsize)
    --synchronously wait for queued signals--
    man page: https://man7.org/linux/man-pages/man2/rt_sigtimedwait.2.html

*sched_rr_get_interval_time64()

    This is a year 2038 safe variant of syscall:

    int sched_rr_get_interval(pid_t pid, struct timespec *tp)
    --get  the  SCHED_RR  interval  for the named process--
    man page: https://man7.org/linux/man-pages/man2/sched_rr_get_interval.2.html

Implementation notes:

    These syscalls were implemented in similar ways like
    'rt_sigtimedwait()' and 'sched_rr_get_interval()' except
    that functions 'target_to_host_timespec64()' and
    'host_to_target_timespec64()' were used to convert values
    of 'struct timespec' between host and target.

Signed-off-by: Filip Bozuta <Filip.Bozuta@syrmia.com>
---
 linux-user/syscall.c | 53 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 53 insertions(+)

diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index c1b36ea698..35f6dded81 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -8831,6 +8831,48 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
             }
         }
         return ret;
+#endif
+#ifdef TARGET_NR_rt_sigtimedwait_time64
+    case TARGET_NR_rt_sigtimedwait_time64:
+        {
+            sigset_t set;
+            struct timespec uts, *puts;
+            siginfo_t uinfo;
+
+            if (arg4 != sizeof(target_sigset_t)) {
+                return -TARGET_EINVAL;
+            }
+
+            p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
+            if (!p) {
+                return -TARGET_EFAULT;
+            }
+            target_to_host_sigset(&set, p);
+            unlock_user(p, arg1, 0);
+            if (arg3) {
+                puts = &uts;
+                if (target_to_host_timespec64(puts, arg3)) {
+                    return -TARGET_EFAULT;
+                }
+            } else {
+                puts = NULL;
+            }
+            ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
+                                                 SIGSET_T_SIZE));
+            if (!is_error(ret)) {
+                if (arg2) {
+                    p = lock_user(VERIFY_WRITE, arg2,
+                                  sizeof(target_siginfo_t), 0);
+                    if (!p) {
+                        return -TARGET_EFAULT;
+                    }
+                    host_to_target_siginfo(p, &uinfo);
+                    unlock_user(p, arg2, sizeof(target_siginfo_t));
+                }
+                ret = host_to_target_signal(ret);
+            }
+        }
+        return ret;
 #endif
     case TARGET_NR_rt_sigqueueinfo:
         {
@@ -10353,6 +10395,17 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
         }
         return ret;
 #endif
+#ifdef TARGET_NR_sched_rr_get_interval_time64
+    case TARGET_NR_sched_rr_get_interval_time64:
+        {
+            struct timespec ts;
+            ret = get_errno(sched_rr_get_interval(arg1, &ts));
+            if (!is_error(ret)) {
+                ret = host_to_target_timespec64(arg2, &ts);
+            }
+        }
+        return ret;
+#endif
 #if defined(TARGET_NR_nanosleep)
     case TARGET_NR_nanosleep:
         {
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 1/2] linux-user: Add support for two 'clock_nanosleep_time64()' and 'clock_adjtime64()'
  2020-07-27 11:23 ` [PATCH v2 1/2] linux-user: Add support for two 'clock_nanosleep_time64()' and 'clock_adjtime64()' Filip Bozuta
@ 2020-08-24 16:40   ` Laurent Vivier
  0 siblings, 0 replies; 5+ messages in thread
From: Laurent Vivier @ 2020-08-24 16:40 UTC (permalink / raw)
  To: Filip Bozuta, qemu-devel; +Cc: Riku Voipio

Le 27/07/2020 à 13:23, Filip Bozuta a écrit :
> This patch implements functionality for following time64 syscall:
> 
> *clock_nanosleep_time64()
> 
>     This is a year 2038 safe vairant of syscall:
>     int clock_nanosleep(clockid_t clockid, int flags,
>                         const struct timespec *request,
>                         struct timespec *remain)
>     --high-resolution sleep with specifiable clock--
>     man page: https://man7.org/linux/man-pages/man2/clock_nanosleep.2.html
> 
> *clock_adjtime64()
> 
>     This is a year 2038 safe variant of syscall:
>     int clock_adjtime(clockid_t clk_id, struct timex *buf)
>     --tune kernel clock--
>     man page: https://man7.org/linux/man-pages/man2/clock_adjtime.2.html
> 
> Implementation notes:
> 
>     Syscall 'clock_nanosleep_time64()' was implemented similarly
>     to syscall 'clock_nanosleep()' except that 'host_to_target_timespec64()'
>     and 'target_to_host_timespec64()' were used instead of the regular
>     'host_to_target_timespec()' and 'target_to_host_timespec()'.
> 
>     For 'clock_adjtime64()' a 64-bit target kernel version of 'struct timex'
>     was defined in 'syscall_defs.h': 'struct target__kernel_timex'.
>     This type was used to convert the values of 64-bit timex type between
>     host and target. For this purpose a 64-bit timex converting functions
>     'target_to_host_timex64()' and 'host_to_target_timex64()'. An existing
>     function 'copy_to_user_timeval64()' was used to convert the field
>     'time' which if of type 'struct timeval' from host to target.
>     Function 'copy_from_user_timveal64()' was added in this patch and
>     used to convert the 'time' field from target to host.
> 
> Signed-off-by: Filip Bozuta <Filip.Bozuta@syrmia.com>
> ---
>  linux-user/syscall.c      | 139 +++++++++++++++++++++++++++++++++++++-
>  linux-user/syscall_defs.h |  31 +++++++++
>  2 files changed, 168 insertions(+), 2 deletions(-)
> 
> diff --git a/linux-user/syscall.c b/linux-user/syscall.c
> index 05f03919ff..c1b36ea698 100644
> --- a/linux-user/syscall.c
> +++ b/linux-user/syscall.c
> @@ -809,7 +809,8 @@ safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
>  safe_syscall2(int, nanosleep, const struct timespec *, req,
>                struct timespec *, rem)
>  #endif
> -#ifdef TARGET_NR_clock_nanosleep
> +#if defined(TARGET_NR_clock_nanosleep) || \
> +    defined(TARGET_NR_clock_nanosleep_time64)
>  safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
>                const struct timespec *, req, struct timespec *, rem)
>  #endif
> @@ -1205,8 +1206,25 @@ static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
>      return 0;
>  }
>  
> +static inline abi_long copy_from_user_timeval64(struct timeval *tv,
> +                                                abi_ulong target_tv_addr)
> +{
> +    struct target__kernel_sock_timeval *target_tv;
> +
> +    if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
> +        return -TARGET_EFAULT;
> +    }
> +
> +    __get_user(tv->tv_sec, &target_tv->tv_sec);
> +    __get_user(tv->tv_usec, &target_tv->tv_usec);
> +
> +    unlock_user_struct(target_tv, target_tv_addr, 0);
> +
> +    return 0;
> +}
> +
>  static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
> -                                             const struct timeval *tv)
> +                                              const struct timeval *tv)
>  {
>      struct target__kernel_sock_timeval *target_tv;
>  
> @@ -6771,6 +6789,87 @@ static inline abi_long host_to_target_timex(abi_long target_addr,
>  }
>  #endif
>  
> +
> +#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
> +static inline abi_long target_to_host_timex64(struct timex *host_tx,
> +                                              abi_long target_addr)
> +{
> +    struct target__kernel_timex *target_tx;
> +
> +    if (copy_from_user_timeval64(&host_tx->time, target_addr +
> +                                 offsetof(struct target__kernel_timex,
> +                                          time))) {
> +        return -TARGET_EFAULT;
> +    }
> +
> +    if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
> +        return -TARGET_EFAULT;
> +    }
> +
> +    __get_user(host_tx->modes, &target_tx->modes);
> +    __get_user(host_tx->offset, &target_tx->offset);
> +    __get_user(host_tx->freq, &target_tx->freq);
> +    __get_user(host_tx->maxerror, &target_tx->maxerror);
> +    __get_user(host_tx->esterror, &target_tx->esterror);
> +    __get_user(host_tx->status, &target_tx->status);
> +    __get_user(host_tx->constant, &target_tx->constant);
> +    __get_user(host_tx->precision, &target_tx->precision);
> +    __get_user(host_tx->tolerance, &target_tx->tolerance);
> +    __get_user(host_tx->tick, &target_tx->tick);
> +    __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
> +    __get_user(host_tx->jitter, &target_tx->jitter);
> +    __get_user(host_tx->shift, &target_tx->shift);
> +    __get_user(host_tx->stabil, &target_tx->stabil);
> +    __get_user(host_tx->jitcnt, &target_tx->jitcnt);
> +    __get_user(host_tx->calcnt, &target_tx->calcnt);
> +    __get_user(host_tx->errcnt, &target_tx->errcnt);
> +    __get_user(host_tx->stbcnt, &target_tx->stbcnt);
> +    __get_user(host_tx->tai, &target_tx->tai);
> +
> +    unlock_user_struct(target_tx, target_addr, 0);
> +    return 0;
> +}
> +
> +static inline abi_long host_to_target_timex64(abi_long target_addr,
> +                                              struct timex *host_tx)
> +{
> +    struct target__kernel_timex *target_tx;
> +
> +   if (copy_to_user_timeval64(target_addr +
> +                              offsetof(struct target__kernel_timex, time),
> +                              &host_tx->time)) {
> +        return -TARGET_EFAULT;
> +    }
> +
> +    if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
> +        return -TARGET_EFAULT;
> +    }
> +
> +    __put_user(host_tx->modes, &target_tx->modes);
> +    __put_user(host_tx->offset, &target_tx->offset);
> +    __put_user(host_tx->freq, &target_tx->freq);
> +    __put_user(host_tx->maxerror, &target_tx->maxerror);
> +    __put_user(host_tx->esterror, &target_tx->esterror);
> +    __put_user(host_tx->status, &target_tx->status);
> +    __put_user(host_tx->constant, &target_tx->constant);
> +    __put_user(host_tx->precision, &target_tx->precision);
> +    __put_user(host_tx->tolerance, &target_tx->tolerance);
> +    __put_user(host_tx->tick, &target_tx->tick);
> +    __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
> +    __put_user(host_tx->jitter, &target_tx->jitter);
> +    __put_user(host_tx->shift, &target_tx->shift);
> +    __put_user(host_tx->stabil, &target_tx->stabil);
> +    __put_user(host_tx->jitcnt, &target_tx->jitcnt);
> +    __put_user(host_tx->calcnt, &target_tx->calcnt);
> +    __put_user(host_tx->errcnt, &target_tx->errcnt);
> +    __put_user(host_tx->stbcnt, &target_tx->stbcnt);
> +    __put_user(host_tx->tai, &target_tx->tai);
> +
> +    unlock_user_struct(target_tx, target_addr, 1);
> +    return 0;
> +}
> +#endif
> +
>  static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
>                                                 abi_ulong target_addr)
>  {
> @@ -9726,6 +9825,23 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
>              }
>          }
>          return ret;
> +#endif
> +#if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
> +    case TARGET_NR_clock_adjtime64:
> +        {
> +            struct timex htx, *phtx = &htx;

I know you have copied the code from TARGET_NR_clock_adjtime, but I
think phtx is totally useless.

> +
> +            if (target_to_host_timex64(phtx, arg2) != 0) {
> +                return -TARGET_EFAULT;
> +            }
> +            ret = get_errno(clock_adjtime(arg1, phtx));
> +            if (!is_error(ret) && phtx) {

value of phtx doesn't change. No need to check.

> +                if (host_to_target_timex64(arg2, phtx) != 0) {
> +                    return -TARGET_EFAULT;
> +                }
> +            }
> +        }
> +        return ret;
>  #endif
>      case TARGET_NR_getpgid:
>          return get_errno(getpgid(arg1));

Thanks,
Laurent


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2 2/2] linux-user: Add support for 'rt_sigtimedwait_time64()' and 'sched_rr_get_interval_time64()'
  2020-07-27 11:23 ` [PATCH v2 2/2] linux-user: Add support for 'rt_sigtimedwait_time64()' and 'sched_rr_get_interval_time64()' Filip Bozuta
@ 2020-08-24 16:43   ` Laurent Vivier
  0 siblings, 0 replies; 5+ messages in thread
From: Laurent Vivier @ 2020-08-24 16:43 UTC (permalink / raw)
  To: Filip Bozuta, qemu-devel; +Cc: Riku Voipio

Le 27/07/2020 à 13:23, Filip Bozuta a écrit :
> This patch implements functionality for following time64 syscalls:
> 
> *rt_sigtimedwait_time64()
> 
>     This is a year 2038 safe variant of syscall:
> 
>     int rt_sigtimedwait(const sigset_t *set, siginfo_t *info,
>                         const struct timespec *timeout, size_t sigsetsize)
>     --synchronously wait for queued signals--
>     man page: https://man7.org/linux/man-pages/man2/rt_sigtimedwait.2.html
> 
> *sched_rr_get_interval_time64()
> 
>     This is a year 2038 safe variant of syscall:
> 
>     int sched_rr_get_interval(pid_t pid, struct timespec *tp)
>     --get  the  SCHED_RR  interval  for the named process--
>     man page: https://man7.org/linux/man-pages/man2/sched_rr_get_interval.2.html
> 
> Implementation notes:
> 
>     These syscalls were implemented in similar ways like
>     'rt_sigtimedwait()' and 'sched_rr_get_interval()' except
>     that functions 'target_to_host_timespec64()' and
>     'host_to_target_timespec64()' were used to convert values
>     of 'struct timespec' between host and target.
> 
> Signed-off-by: Filip Bozuta <Filip.Bozuta@syrmia.com>
> ---
>  linux-user/syscall.c | 53 ++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 53 insertions(+)
> 
> diff --git a/linux-user/syscall.c b/linux-user/syscall.c
> index c1b36ea698..35f6dded81 100644
> --- a/linux-user/syscall.c
> +++ b/linux-user/syscall.c
> @@ -8831,6 +8831,48 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
>              }
>          }
>          return ret;
> +#endif
> +#ifdef TARGET_NR_rt_sigtimedwait_time64
> +    case TARGET_NR_rt_sigtimedwait_time64:
> +        {
> +            sigset_t set;
> +            struct timespec uts, *puts;
> +            siginfo_t uinfo;
> +
> +            if (arg4 != sizeof(target_sigset_t)) {
> +                return -TARGET_EINVAL;
> +            }
> +
> +            p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
> +            if (!p) {
> +                return -TARGET_EFAULT;
> +            }
> +            target_to_host_sigset(&set, p);
> +            unlock_user(p, arg1, 0);
> +            if (arg3) {
> +                puts = &uts;
> +                if (target_to_host_timespec64(puts, arg3)) {
> +                    return -TARGET_EFAULT;
> +                }
> +            } else {
> +                puts = NULL;
> +            }
> +            ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
> +                                                 SIGSET_T_SIZE));
> +            if (!is_error(ret)) {
> +                if (arg2) {
> +                    p = lock_user(VERIFY_WRITE, arg2,
> +                                  sizeof(target_siginfo_t), 0);
> +                    if (!p) {
> +                        return -TARGET_EFAULT;
> +                    }
> +                    host_to_target_siginfo(p, &uinfo);
> +                    unlock_user(p, arg2, sizeof(target_siginfo_t));
> +                }
> +                ret = host_to_target_signal(ret);
> +            }
> +        }
> +        return ret;
>  #endif
>      case TARGET_NR_rt_sigqueueinfo:
>          {
> @@ -10353,6 +10395,17 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
>          }
>          return ret;
>  #endif
> +#ifdef TARGET_NR_sched_rr_get_interval_time64
> +    case TARGET_NR_sched_rr_get_interval_time64:
> +        {
> +            struct timespec ts;
> +            ret = get_errno(sched_rr_get_interval(arg1, &ts));
> +            if (!is_error(ret)) {
> +                ret = host_to_target_timespec64(arg2, &ts);
> +            }
> +        }
> +        return ret;
> +#endif
>  #if defined(TARGET_NR_nanosleep)
>      case TARGET_NR_nanosleep:
>          {
> 

Reviewed-by: Laurent Vivier <laurent@vivier.eu>


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-08-24 16:45 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-27 11:23 [PATCH v2 0/2] Introducing functionality for a group of 4 time64 syscalls Filip Bozuta
2020-07-27 11:23 ` [PATCH v2 1/2] linux-user: Add support for two 'clock_nanosleep_time64()' and 'clock_adjtime64()' Filip Bozuta
2020-08-24 16:40   ` Laurent Vivier
2020-07-27 11:23 ` [PATCH v2 2/2] linux-user: Add support for 'rt_sigtimedwait_time64()' and 'sched_rr_get_interval_time64()' Filip Bozuta
2020-08-24 16:43   ` Laurent Vivier

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.