* [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic
@ 2020-04-29 7:03 Jeremy Kerr
2020-04-29 7:03 ` [PATCH 2/2] powerpc/spufs: stop using access_ok Jeremy Kerr
2020-05-04 13:04 ` [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic Christoph Hellwig
0 siblings, 2 replies; 9+ messages in thread
From: Jeremy Kerr @ 2020-04-29 7:03 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Christoph Hellwig, Arnd Bergmann
Currently, we may perform a copy_to_user (through
simple_read_from_buffer()) while holding a context's register_lock,
while accessing the context save area.
This change uses a temporary buffer for the context save area data,
which we then pass to simple_read_from_buffer.
Includes changes from Christoph Hellwig <hch@lst.de>.
Fixes: bf1ab978be23 ("[POWERPC] coredump: Add SPU elf notes to coredump.")
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
---
arch/powerpc/platforms/cell/spufs/file.c | 113 +++++++++++++++--------
1 file changed, 75 insertions(+), 38 deletions(-)
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index c0f950a3f4e1..b4e1ef650b40 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1978,8 +1978,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
- int ret;
struct spu_context *ctx = file->private_data;
+ u32 stat, data;
+ int ret;
if (!access_ok(buf, len))
return -EFAULT;
@@ -1988,11 +1989,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_mbox_info_read(ctx, buf, len, pos);
+ stat = ctx->csa.prob.mb_stat_R;
+ data = ctx->csa.prob.pu_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ /* EOF if there's no entry in the mbox */
+ if (!(stat & 0x0000ff))
+ return 0;
+
+ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_mbox_info_fops = {
@@ -2019,6 +2025,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ u32 stat, data;
int ret;
if (!access_ok(buf, len))
@@ -2028,11 +2035,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_ibox_info_read(ctx, buf, len, pos);
+ stat = ctx->csa.prob.mb_stat_R;
+ data = ctx->csa.priv2.puint_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ /* EOF if there's no entry in the ibox */
+ if (!(stat & 0xff0000))
+ return 0;
+
+ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_ibox_info_fops = {
@@ -2041,6 +2053,11 @@ static const struct file_operations spufs_ibox_info_fops = {
.llseek = generic_file_llseek,
};
+static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
+{
+ return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
+}
+
static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
@@ -2049,7 +2066,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
u32 wbox_stat;
wbox_stat = ctx->csa.prob.mb_stat_R;
- cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
+ cnt = spufs_wbox_info_cnt(ctx);
for (i = 0; i < cnt; i++) {
data[i] = ctx->csa.spu_mailbox_data[i];
}
@@ -2062,7 +2079,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- int ret;
+ u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
+ int ret, count;
if (!access_ok(buf, len))
return -EFAULT;
@@ -2071,11 +2089,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_wbox_info_read(ctx, buf, len, pos);
+ count = spufs_wbox_info_cnt(ctx);
+ memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &data,
+ count * sizeof(u32));
}
static const struct file_operations spufs_wbox_info_fops = {
@@ -2084,27 +2104,33 @@ static const struct file_operations spufs_wbox_info_fops = {
.llseek = generic_file_llseek,
};
-static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
- char __user *buf, size_t len, loff_t *pos)
+static void ___spufs_dma_info_read(struct spu_context *ctx,
+ struct spu_dma_info *info)
{
- struct spu_dma_info info;
- struct mfc_cq_sr *qp, *spuqp;
int i;
- info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
- info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
- info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
- info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
- info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
+ info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
+ info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
+ info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
+ info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
+ info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
for (i = 0; i < 16; i++) {
- qp = &info.dma_info_command_data[i];
- spuqp = &ctx->csa.priv2.spuq[i];
+ struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
+ struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
}
+}
+
+static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
+ char __user *buf, size_t len, loff_t *pos)
+{
+ struct spu_dma_info info;
+
+ ___spufs_dma_info_read(ctx, &info);
return simple_read_from_buffer(buf, len, pos, &info,
sizeof info);
@@ -2114,6 +2140,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ struct spu_dma_info info;
int ret;
if (!access_ok(buf, len))
@@ -2123,11 +2150,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_dma_info_read(ctx, buf, len, pos);
+ ___spufs_dma_info_read(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &info,
+ sizeof(info));
}
static const struct file_operations spufs_dma_info_fops = {
@@ -2136,13 +2164,31 @@ static const struct file_operations spufs_dma_info_fops = {
.llseek = no_llseek,
};
+static void ___spufs_proxydma_info_read(struct spu_context *ctx,
+ struct spu_proxydma_info *info)
+{
+ int i;
+
+ info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
+ info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
+ info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
+
+ for (i = 0; i < 8; i++) {
+ struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
+ struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
+
+ qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
+ qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
+ qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
+ qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
+ }
+}
+
static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
struct spu_proxydma_info info;
- struct mfc_cq_sr *qp, *puqp;
int ret = sizeof info;
- int i;
if (len < ret)
return -EINVAL;
@@ -2150,18 +2196,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
if (!access_ok(buf, len))
return -EFAULT;
- info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
- info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
- info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
- for (i = 0; i < 8; i++) {
- qp = &info.proxydma_info_command_data[i];
- puqp = &ctx->csa.priv2.puq[i];
-
- qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
- qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
- qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
- qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
- }
+ ___spufs_proxydma_info_read(ctx, &info);
return simple_read_from_buffer(buf, len, pos, &info,
sizeof info);
@@ -2171,17 +2206,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ struct spu_proxydma_info info;
int ret;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
+ ___spufs_proxydma_info_read(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &info,
+ sizeof(info));
}
static const struct file_operations spufs_proxydma_info_fops = {
--
2.17.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 2/2] powerpc/spufs: stop using access_ok
2020-04-29 7:03 [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic Jeremy Kerr
@ 2020-04-29 7:03 ` Jeremy Kerr
2020-04-29 15:00 ` Christophe Leroy
2020-05-04 13:04 ` [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic Christoph Hellwig
1 sibling, 1 reply; 9+ messages in thread
From: Jeremy Kerr @ 2020-04-29 7:03 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Christoph Hellwig, Arnd Bergmann
From: Christoph Hellwig <hch@lst.de>
Just use the proper non __-prefixed get/put_user variants where that is
not done yet.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
---
arch/powerpc/platforms/cell/spufs/file.c | 42 +++++-------------------
1 file changed, 8 insertions(+), 34 deletions(-)
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index b4e1ef650b40..cd7d10f27fad 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -590,17 +590,12 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 mbox_data, __user *udata;
+ u32 mbox_data, __user *udata = (void __user *)buf;
ssize_t count;
if (len < 4)
return -EINVAL;
- if (!access_ok(buf, len))
- return -EFAULT;
-
- udata = (void __user *)buf;
-
count = spu_acquire(ctx);
if (count)
return count;
@@ -616,7 +611,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
* but still need to return the data we have
* read successfully so far.
*/
- ret = __put_user(mbox_data, udata);
+ ret = put_user(mbox_data, udata);
if (ret) {
if (!count)
count = -EFAULT;
@@ -698,17 +693,12 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 ibox_data, __user *udata;
+ u32 ibox_data, __user *udata = (void __user *)buf;
ssize_t count;
if (len < 4)
return -EINVAL;
- if (!access_ok(buf, len))
- return -EFAULT;
-
- udata = (void __user *)buf;
-
count = spu_acquire(ctx);
if (count)
goto out;
@@ -727,7 +717,7 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
}
/* if we can't write at all, return -EFAULT */
- count = __put_user(ibox_data, udata);
+ count = put_user(ibox_data, udata);
if (count)
goto out_unlock;
@@ -741,7 +731,7 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
* but still need to return the data we have
* read successfully so far.
*/
- ret = __put_user(ibox_data, udata);
+ ret = put_user(ibox_data, udata);
if (ret)
break;
}
@@ -836,17 +826,13 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 wbox_data, __user *udata;
+ u32 wbox_data, __user *udata = (void __user *)buf;
ssize_t count;
if (len < 4)
return -EINVAL;
- udata = (void __user *)buf;
- if (!access_ok(buf, len))
- return -EFAULT;
-
- if (__get_user(wbox_data, udata))
+ if (get_user(wbox_data, udata))
return -EFAULT;
count = spu_acquire(ctx);
@@ -873,7 +859,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
/* write as much as possible */
for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
int ret;
- ret = __get_user(wbox_data, udata);
+ ret = get_user(wbox_data, udata);
if (ret)
break;
@@ -1982,9 +1968,6 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
u32 stat, data;
int ret;
- if (!access_ok(buf, len))
- return -EFAULT;
-
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
@@ -2028,9 +2011,6 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
u32 stat, data;
int ret;
- if (!access_ok(buf, len))
- return -EFAULT;
-
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
@@ -2082,9 +2062,6 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
int ret, count;
- if (!access_ok(buf, len))
- return -EFAULT;
-
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
@@ -2143,9 +2120,6 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
struct spu_dma_info info;
int ret;
- if (!access_ok(buf, len))
- return -EFAULT;
-
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
--
2.17.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH 2/2] powerpc/spufs: stop using access_ok
2020-04-29 7:03 ` [PATCH 2/2] powerpc/spufs: stop using access_ok Jeremy Kerr
@ 2020-04-29 15:00 ` Christophe Leroy
2020-04-30 0:39 ` Jeremy Kerr
0 siblings, 1 reply; 9+ messages in thread
From: Christophe Leroy @ 2020-04-29 15:00 UTC (permalink / raw)
To: Jeremy Kerr, linuxppc-dev; +Cc: Christoph Hellwig, Arnd Bergmann
Le 29/04/2020 à 09:03, Jeremy Kerr a écrit :
> From: Christoph Hellwig <hch@lst.de>
>
> Just use the proper non __-prefixed get/put_user variants where that is
> not done yet.
But it means you are doing the access_ok() check everytime, which is
what is to be avoided by doing the access_ok() once then using the
__-prefixed variant.
Christophe
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
> ---
> arch/powerpc/platforms/cell/spufs/file.c | 42 +++++-------------------
> 1 file changed, 8 insertions(+), 34 deletions(-)
>
> diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
> index b4e1ef650b40..cd7d10f27fad 100644
> --- a/arch/powerpc/platforms/cell/spufs/file.c
> +++ b/arch/powerpc/platforms/cell/spufs/file.c
> @@ -590,17 +590,12 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
> size_t len, loff_t *pos)
> {
> struct spu_context *ctx = file->private_data;
> - u32 mbox_data, __user *udata;
> + u32 mbox_data, __user *udata = (void __user *)buf;
> ssize_t count;
>
> if (len < 4)
> return -EINVAL;
>
> - if (!access_ok(buf, len))
> - return -EFAULT;
> -
> - udata = (void __user *)buf;
> -
> count = spu_acquire(ctx);
> if (count)
> return count;
> @@ -616,7 +611,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
> * but still need to return the data we have
> * read successfully so far.
> */
> - ret = __put_user(mbox_data, udata);
> + ret = put_user(mbox_data, udata);
> if (ret) {
> if (!count)
> count = -EFAULT;
> @@ -698,17 +693,12 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
> size_t len, loff_t *pos)
> {
> struct spu_context *ctx = file->private_data;
> - u32 ibox_data, __user *udata;
> + u32 ibox_data, __user *udata = (void __user *)buf;
> ssize_t count;
>
> if (len < 4)
> return -EINVAL;
>
> - if (!access_ok(buf, len))
> - return -EFAULT;
> -
> - udata = (void __user *)buf;
> -
> count = spu_acquire(ctx);
> if (count)
> goto out;
> @@ -727,7 +717,7 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
> }
>
> /* if we can't write at all, return -EFAULT */
> - count = __put_user(ibox_data, udata);
> + count = put_user(ibox_data, udata);
> if (count)
> goto out_unlock;
>
> @@ -741,7 +731,7 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
> * but still need to return the data we have
> * read successfully so far.
> */
> - ret = __put_user(ibox_data, udata);
> + ret = put_user(ibox_data, udata);
> if (ret)
> break;
> }
> @@ -836,17 +826,13 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
> size_t len, loff_t *pos)
> {
> struct spu_context *ctx = file->private_data;
> - u32 wbox_data, __user *udata;
> + u32 wbox_data, __user *udata = (void __user *)buf;
> ssize_t count;
>
> if (len < 4)
> return -EINVAL;
>
> - udata = (void __user *)buf;
> - if (!access_ok(buf, len))
> - return -EFAULT;
> -
> - if (__get_user(wbox_data, udata))
> + if (get_user(wbox_data, udata))
> return -EFAULT;
>
> count = spu_acquire(ctx);
> @@ -873,7 +859,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
> /* write as much as possible */
> for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
> int ret;
> - ret = __get_user(wbox_data, udata);
> + ret = get_user(wbox_data, udata);
> if (ret)
> break;
>
> @@ -1982,9 +1968,6 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
> u32 stat, data;
> int ret;
>
> - if (!access_ok(buf, len))
> - return -EFAULT;
> -
> ret = spu_acquire_saved(ctx);
> if (ret)
> return ret;
> @@ -2028,9 +2011,6 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
> u32 stat, data;
> int ret;
>
> - if (!access_ok(buf, len))
> - return -EFAULT;
> -
> ret = spu_acquire_saved(ctx);
> if (ret)
> return ret;
> @@ -2082,9 +2062,6 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
> u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
> int ret, count;
>
> - if (!access_ok(buf, len))
> - return -EFAULT;
> -
> ret = spu_acquire_saved(ctx);
> if (ret)
> return ret;
> @@ -2143,9 +2120,6 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
> struct spu_dma_info info;
> int ret;
>
> - if (!access_ok(buf, len))
> - return -EFAULT;
> -
> ret = spu_acquire_saved(ctx);
> if (ret)
> return ret;
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 2/2] powerpc/spufs: stop using access_ok
2020-04-29 15:00 ` Christophe Leroy
@ 2020-04-30 0:39 ` Jeremy Kerr
2020-04-30 5:39 ` Christoph Hellwig
0 siblings, 1 reply; 9+ messages in thread
From: Jeremy Kerr @ 2020-04-30 0:39 UTC (permalink / raw)
To: Christophe Leroy, linuxppc-dev; +Cc: Christoph Hellwig, Arnd Bergmann
Hi Christophe,
> > Just use the proper non __-prefixed get/put_user variants where
> > that is not done yet.
>
> But it means you are doing the access_ok() check everytime, which is
> what is to be avoided by doing the access_ok() once then using the
> __-prefixed variant.
5 out of 8 of these are just a access_ok(); simple_read_from_buffer().
For the cases where it's multiple __put/get_user()s, the max will be 5.
(for the mbox access). Is that worth optimising the access_ok() checks?
Cheers,
Jeremy
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 2/2] powerpc/spufs: stop using access_ok
2020-04-30 0:39 ` Jeremy Kerr
@ 2020-04-30 5:39 ` Christoph Hellwig
2020-04-30 6:18 ` Christophe Leroy
0 siblings, 1 reply; 9+ messages in thread
From: Christoph Hellwig @ 2020-04-30 5:39 UTC (permalink / raw)
To: Jeremy Kerr; +Cc: linuxppc-dev, Christoph Hellwig, Arnd Bergmann
On Thu, Apr 30, 2020 at 08:39:00AM +0800, Jeremy Kerr wrote:
> Hi Christophe,
>
> > > Just use the proper non __-prefixed get/put_user variants where
> > > that is not done yet.
> >
> > But it means you are doing the access_ok() check everytime, which is
> > what is to be avoided by doing the access_ok() once then using the
> > __-prefixed variant.
>
> 5 out of 8 of these are just a access_ok(); simple_read_from_buffer().
>
> For the cases where it's multiple __put/get_user()s, the max will be 5.
> (for the mbox access). Is that worth optimising the access_ok() checks?
access_ok is just trivial comparism to the segment limit, I don't
think it has a relavant performance impact.
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 2/2] powerpc/spufs: stop using access_ok
2020-04-30 5:39 ` Christoph Hellwig
@ 2020-04-30 6:18 ` Christophe Leroy
0 siblings, 0 replies; 9+ messages in thread
From: Christophe Leroy @ 2020-04-30 6:18 UTC (permalink / raw)
To: Christoph Hellwig, Jeremy Kerr; +Cc: linuxppc-dev, Arnd Bergmann
Le 30/04/2020 à 07:39, Christoph Hellwig a écrit :
> On Thu, Apr 30, 2020 at 08:39:00AM +0800, Jeremy Kerr wrote:
>> Hi Christophe,
>>
>>>> Just use the proper non __-prefixed get/put_user variants where
>>>> that is not done yet.
>>>
>>> But it means you are doing the access_ok() check everytime, which is
>>> what is to be avoided by doing the access_ok() once then using the
>>> __-prefixed variant.
>>
>> 5 out of 8 of these are just a access_ok(); simple_read_from_buffer().
>>
>> For the cases where it's multiple __put/get_user()s, the max will be 5.
>> (for the mbox access). Is that worth optimising the access_ok() checks?
>
> access_ok is just trivial comparism to the segment limit, I don't
> think it has a relavant performance impact.
>
I think it has an impact. See the difference between the two following
trivial functions:
int test1(unsigned long val, unsigned long *addr)
{
return __put_user(val, addr);
}
int test2(unsigned long val, unsigned long *addr)
{
return put_user(val, addr);
}
00000000 <test1>:
0: 39 20 00 00 li r9,0
4: 90 64 00 00 stw r3,0(r4)
8: 7d 23 4b 78 mr r3,r9
c: 4e 80 00 20 blr
00000000 <test2>:
0: 81 22 04 38 lwz r9,1080(r2)
4: 7c 6a 1b 78 mr r10,r3
8: 7f 89 20 40 cmplw cr7,r9,r4
c: 41 9c 00 24 blt cr7,30 <test2+0x30>
10: 7d 24 48 50 subf r9,r4,r9
14: 38 60 ff f2 li r3,-14
18: 2b 89 00 02 cmplwi cr7,r9,2
1c: 4c 9d 00 20 blelr cr7
20: 39 20 00 00 li r9,0
24: 91 44 00 00 stw r10,0(r4)
28: 7d 23 4b 78 mr r3,r9
2c: 4e 80 00 20 blr
30: 38 60 ff f2 li r3,-14
34: 4e 80 00 20 blr
It looks like GCC is smart enough to read the limit in task struct only
once when we have two consecutive put_user() but there is still some
difference:
int test3(unsigned long val, unsigned long *addr)
{
return put_user(val, addr) ? : put_user(val, addr + 1);
}
int test4(unsigned long val, unsigned long *addr)
{
if (!access_ok(addr, sizeof(*addr)))
return -EFAULT;
return __put_user(val, addr) ? : __put_user(val, addr + 1);
}
00000000 <test3>:
0: 81 42 04 38 lwz r10,1080(r2)
4: 7f 8a 20 40 cmplw cr7,r10,r4
8: 41 9c 00 48 blt cr7,50 <test3+0x50>
c: 7d 04 50 50 subf r8,r4,r10
10: 39 20 ff f2 li r9,-14
14: 2b 88 00 02 cmplwi cr7,r8,2
18: 40 9d 00 30 ble cr7,48 <test3+0x48>
1c: 39 20 00 00 li r9,0
20: 90 64 00 00 stw r3,0(r4)
24: 2f 89 00 00 cmpwi cr7,r9,0
28: 40 9e 00 20 bne cr7,48 <test3+0x48>
2c: 38 84 00 04 addi r4,r4,4
30: 7f 8a 20 40 cmplw cr7,r10,r4
34: 41 9c 00 1c blt cr7,50 <test3+0x50>
38: 7d 44 50 50 subf r10,r4,r10
3c: 2b 8a 00 02 cmplwi cr7,r10,2
40: 40 9d 00 10 ble cr7,50 <test3+0x50>
44: 90 64 00 00 stw r3,0(r4)
48: 7d 23 4b 78 mr r3,r9
4c: 4e 80 00 20 blr
50: 39 20 ff f2 li r9,-14
54: 4b ff ff f4 b 48 <test3+0x48>
Disassembly of section .text.test4:
00000000 <test4>:
0: 81 22 04 38 lwz r9,1080(r2)
4: 7f 89 20 40 cmplw cr7,r9,r4
8: 41 9c 00 34 blt cr7,3c <test4+0x3c>
c: 7d 44 48 50 subf r10,r4,r9
10: 39 20 ff f2 li r9,-14
14: 2b 8a 00 06 cmplwi cr7,r10,6
18: 40 9d 00 1c ble cr7,34 <test4+0x34>
1c: 39 20 00 00 li r9,0
20: 90 64 00 00 stw r3,0(r4)
24: 2f 89 00 00 cmpwi cr7,r9,0
28: 40 9e 00 0c bne cr7,34 <test4+0x34>
2c: 38 84 00 04 addi r4,r4,4
30: 90 64 00 00 stw r3,0(r4)
34: 7d 23 4b 78 mr r3,r9
38: 4e 80 00 20 blr
3c: 39 20 ff f2 li r9,-14
40: 4b ff ff f4 b 34 <test4+0x34>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic
2020-04-29 7:03 [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic Jeremy Kerr
2020-04-29 7:03 ` [PATCH 2/2] powerpc/spufs: stop using access_ok Jeremy Kerr
@ 2020-05-04 13:04 ` Christoph Hellwig
2020-05-05 7:20 ` Michael Ellerman
1 sibling, 1 reply; 9+ messages in thread
From: Christoph Hellwig @ 2020-05-04 13:04 UTC (permalink / raw)
To: Jeremy Kerr; +Cc: linuxppc-dev, Christoph Hellwig, Arnd Bergmann
powerpc mantainers,
are you going to pick this up for the next -rc1? I'm waiting for it to
hit upstream before resending the coredump series.
On Wed, Apr 29, 2020 at 03:03:02PM +0800, Jeremy Kerr wrote:
> Currently, we may perform a copy_to_user (through
> simple_read_from_buffer()) while holding a context's register_lock,
> while accessing the context save area.
>
> This change uses a temporary buffer for the context save area data,
> which we then pass to simple_read_from_buffer.
>
> Includes changes from Christoph Hellwig <hch@lst.de>.
>
> Fixes: bf1ab978be23 ("[POWERPC] coredump: Add SPU elf notes to coredump.")
> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
> Reviewed-by: Arnd Bergmann <arnd@arndb.de>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> ---
> arch/powerpc/platforms/cell/spufs/file.c | 113 +++++++++++++++--------
> 1 file changed, 75 insertions(+), 38 deletions(-)
>
> diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
> index c0f950a3f4e1..b4e1ef650b40 100644
> --- a/arch/powerpc/platforms/cell/spufs/file.c
> +++ b/arch/powerpc/platforms/cell/spufs/file.c
> @@ -1978,8 +1978,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
> static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
> size_t len, loff_t *pos)
> {
> - int ret;
> struct spu_context *ctx = file->private_data;
> + u32 stat, data;
> + int ret;
>
> if (!access_ok(buf, len))
> return -EFAULT;
> @@ -1988,11 +1989,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
> if (ret)
> return ret;
> spin_lock(&ctx->csa.register_lock);
> - ret = __spufs_mbox_info_read(ctx, buf, len, pos);
> + stat = ctx->csa.prob.mb_stat_R;
> + data = ctx->csa.prob.pu_mb_R;
> spin_unlock(&ctx->csa.register_lock);
> spu_release_saved(ctx);
>
> - return ret;
> + /* EOF if there's no entry in the mbox */
> + if (!(stat & 0x0000ff))
> + return 0;
> +
> + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
> }
>
> static const struct file_operations spufs_mbox_info_fops = {
> @@ -2019,6 +2025,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
> size_t len, loff_t *pos)
> {
> struct spu_context *ctx = file->private_data;
> + u32 stat, data;
> int ret;
>
> if (!access_ok(buf, len))
> @@ -2028,11 +2035,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
> if (ret)
> return ret;
> spin_lock(&ctx->csa.register_lock);
> - ret = __spufs_ibox_info_read(ctx, buf, len, pos);
> + stat = ctx->csa.prob.mb_stat_R;
> + data = ctx->csa.priv2.puint_mb_R;
> spin_unlock(&ctx->csa.register_lock);
> spu_release_saved(ctx);
>
> - return ret;
> + /* EOF if there's no entry in the ibox */
> + if (!(stat & 0xff0000))
> + return 0;
> +
> + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
> }
>
> static const struct file_operations spufs_ibox_info_fops = {
> @@ -2041,6 +2053,11 @@ static const struct file_operations spufs_ibox_info_fops = {
> .llseek = generic_file_llseek,
> };
>
> +static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
> +{
> + return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
> +}
> +
> static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
> char __user *buf, size_t len, loff_t *pos)
> {
> @@ -2049,7 +2066,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
> u32 wbox_stat;
>
> wbox_stat = ctx->csa.prob.mb_stat_R;
> - cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
> + cnt = spufs_wbox_info_cnt(ctx);
> for (i = 0; i < cnt; i++) {
> data[i] = ctx->csa.spu_mailbox_data[i];
> }
> @@ -2062,7 +2079,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
> size_t len, loff_t *pos)
> {
> struct spu_context *ctx = file->private_data;
> - int ret;
> + u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
> + int ret, count;
>
> if (!access_ok(buf, len))
> return -EFAULT;
> @@ -2071,11 +2089,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
> if (ret)
> return ret;
> spin_lock(&ctx->csa.register_lock);
> - ret = __spufs_wbox_info_read(ctx, buf, len, pos);
> + count = spufs_wbox_info_cnt(ctx);
> + memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
> spin_unlock(&ctx->csa.register_lock);
> spu_release_saved(ctx);
>
> - return ret;
> + return simple_read_from_buffer(buf, len, pos, &data,
> + count * sizeof(u32));
> }
>
> static const struct file_operations spufs_wbox_info_fops = {
> @@ -2084,27 +2104,33 @@ static const struct file_operations spufs_wbox_info_fops = {
> .llseek = generic_file_llseek,
> };
>
> -static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
> - char __user *buf, size_t len, loff_t *pos)
> +static void ___spufs_dma_info_read(struct spu_context *ctx,
> + struct spu_dma_info *info)
> {
> - struct spu_dma_info info;
> - struct mfc_cq_sr *qp, *spuqp;
> int i;
>
> - info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
> - info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
> - info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
> - info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
> - info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
> + info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
> + info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
> + info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
> + info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
> + info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
> for (i = 0; i < 16; i++) {
> - qp = &info.dma_info_command_data[i];
> - spuqp = &ctx->csa.priv2.spuq[i];
> + struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
> + struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
>
> qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
> qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
> qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
> qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
> }
> +}
> +
> +static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
> + char __user *buf, size_t len, loff_t *pos)
> +{
> + struct spu_dma_info info;
> +
> + ___spufs_dma_info_read(ctx, &info);
>
> return simple_read_from_buffer(buf, len, pos, &info,
> sizeof info);
> @@ -2114,6 +2140,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
> size_t len, loff_t *pos)
> {
> struct spu_context *ctx = file->private_data;
> + struct spu_dma_info info;
> int ret;
>
> if (!access_ok(buf, len))
> @@ -2123,11 +2150,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
> if (ret)
> return ret;
> spin_lock(&ctx->csa.register_lock);
> - ret = __spufs_dma_info_read(ctx, buf, len, pos);
> + ___spufs_dma_info_read(ctx, &info);
> spin_unlock(&ctx->csa.register_lock);
> spu_release_saved(ctx);
>
> - return ret;
> + return simple_read_from_buffer(buf, len, pos, &info,
> + sizeof(info));
> }
>
> static const struct file_operations spufs_dma_info_fops = {
> @@ -2136,13 +2164,31 @@ static const struct file_operations spufs_dma_info_fops = {
> .llseek = no_llseek,
> };
>
> +static void ___spufs_proxydma_info_read(struct spu_context *ctx,
> + struct spu_proxydma_info *info)
> +{
> + int i;
> +
> + info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
> + info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
> + info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
> +
> + for (i = 0; i < 8; i++) {
> + struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
> + struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
> +
> + qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
> + qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
> + qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
> + qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
> + }
> +}
> +
> static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
> char __user *buf, size_t len, loff_t *pos)
> {
> struct spu_proxydma_info info;
> - struct mfc_cq_sr *qp, *puqp;
> int ret = sizeof info;
> - int i;
>
> if (len < ret)
> return -EINVAL;
> @@ -2150,18 +2196,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
> if (!access_ok(buf, len))
> return -EFAULT;
>
> - info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
> - info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
> - info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
> - for (i = 0; i < 8; i++) {
> - qp = &info.proxydma_info_command_data[i];
> - puqp = &ctx->csa.priv2.puq[i];
> -
> - qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
> - qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
> - qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
> - qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
> - }
> + ___spufs_proxydma_info_read(ctx, &info);
>
> return simple_read_from_buffer(buf, len, pos, &info,
> sizeof info);
> @@ -2171,17 +2206,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
> size_t len, loff_t *pos)
> {
> struct spu_context *ctx = file->private_data;
> + struct spu_proxydma_info info;
> int ret;
>
> ret = spu_acquire_saved(ctx);
> if (ret)
> return ret;
> spin_lock(&ctx->csa.register_lock);
> - ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
> + ___spufs_proxydma_info_read(ctx, &info);
> spin_unlock(&ctx->csa.register_lock);
> spu_release_saved(ctx);
>
> - return ret;
> + return simple_read_from_buffer(buf, len, pos, &info,
> + sizeof(info));
> }
>
> static const struct file_operations spufs_proxydma_info_fops = {
> --
> 2.17.1
---end quoted text---
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic
2020-05-04 13:04 ` [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic Christoph Hellwig
@ 2020-05-05 7:20 ` Michael Ellerman
2020-05-05 7:30 ` Christoph Hellwig
0 siblings, 1 reply; 9+ messages in thread
From: Michael Ellerman @ 2020-05-05 7:20 UTC (permalink / raw)
To: Christoph Hellwig, Jeremy Kerr
Cc: linuxppc-dev, Christoph Hellwig, Arnd Bergmann
Christoph Hellwig <hch@lst.de> writes:
> powerpc mantainers,
There's only one of me.
> are you going to pick this up for the next -rc1? I'm waiting for it to
> hit upstream before resending the coredump series.
I thought you were going to take it in your series.
Otherwise you'll be waiting 4 or more weeks before this hits rc1.
I can put it in a topic branch if you're worried about merge conflicts.
There's also the fcheck() RCU fix I need to repost as a proper patch, it
seems to work.
cheers
> On Wed, Apr 29, 2020 at 03:03:02PM +0800, Jeremy Kerr wrote:
>> Currently, we may perform a copy_to_user (through
>> simple_read_from_buffer()) while holding a context's register_lock,
>> while accessing the context save area.
>>
>> This change uses a temporary buffer for the context save area data,
>> which we then pass to simple_read_from_buffer.
>>
>> Includes changes from Christoph Hellwig <hch@lst.de>.
>>
>> Fixes: bf1ab978be23 ("[POWERPC] coredump: Add SPU elf notes to coredump.")
>> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
>> Reviewed-by: Arnd Bergmann <arnd@arndb.de>
>> Reviewed-by: Christoph Hellwig <hch@lst.de>
>> ---
>> arch/powerpc/platforms/cell/spufs/file.c | 113 +++++++++++++++--------
>> 1 file changed, 75 insertions(+), 38 deletions(-)
>>
>> diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
>> index c0f950a3f4e1..b4e1ef650b40 100644
>> --- a/arch/powerpc/platforms/cell/spufs/file.c
>> +++ b/arch/powerpc/platforms/cell/spufs/file.c
>> @@ -1978,8 +1978,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
>> static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
>> size_t len, loff_t *pos)
>> {
>> - int ret;
>> struct spu_context *ctx = file->private_data;
>> + u32 stat, data;
>> + int ret;
>>
>> if (!access_ok(buf, len))
>> return -EFAULT;
>> @@ -1988,11 +1989,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
>> if (ret)
>> return ret;
>> spin_lock(&ctx->csa.register_lock);
>> - ret = __spufs_mbox_info_read(ctx, buf, len, pos);
>> + stat = ctx->csa.prob.mb_stat_R;
>> + data = ctx->csa.prob.pu_mb_R;
>> spin_unlock(&ctx->csa.register_lock);
>> spu_release_saved(ctx);
>>
>> - return ret;
>> + /* EOF if there's no entry in the mbox */
>> + if (!(stat & 0x0000ff))
>> + return 0;
>> +
>> + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
>> }
>>
>> static const struct file_operations spufs_mbox_info_fops = {
>> @@ -2019,6 +2025,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
>> size_t len, loff_t *pos)
>> {
>> struct spu_context *ctx = file->private_data;
>> + u32 stat, data;
>> int ret;
>>
>> if (!access_ok(buf, len))
>> @@ -2028,11 +2035,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
>> if (ret)
>> return ret;
>> spin_lock(&ctx->csa.register_lock);
>> - ret = __spufs_ibox_info_read(ctx, buf, len, pos);
>> + stat = ctx->csa.prob.mb_stat_R;
>> + data = ctx->csa.priv2.puint_mb_R;
>> spin_unlock(&ctx->csa.register_lock);
>> spu_release_saved(ctx);
>>
>> - return ret;
>> + /* EOF if there's no entry in the ibox */
>> + if (!(stat & 0xff0000))
>> + return 0;
>> +
>> + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
>> }
>>
>> static const struct file_operations spufs_ibox_info_fops = {
>> @@ -2041,6 +2053,11 @@ static const struct file_operations spufs_ibox_info_fops = {
>> .llseek = generic_file_llseek,
>> };
>>
>> +static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
>> +{
>> + return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
>> +}
>> +
>> static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
>> char __user *buf, size_t len, loff_t *pos)
>> {
>> @@ -2049,7 +2066,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
>> u32 wbox_stat;
>>
>> wbox_stat = ctx->csa.prob.mb_stat_R;
>> - cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
>> + cnt = spufs_wbox_info_cnt(ctx);
>> for (i = 0; i < cnt; i++) {
>> data[i] = ctx->csa.spu_mailbox_data[i];
>> }
>> @@ -2062,7 +2079,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
>> size_t len, loff_t *pos)
>> {
>> struct spu_context *ctx = file->private_data;
>> - int ret;
>> + u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
>> + int ret, count;
>>
>> if (!access_ok(buf, len))
>> return -EFAULT;
>> @@ -2071,11 +2089,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
>> if (ret)
>> return ret;
>> spin_lock(&ctx->csa.register_lock);
>> - ret = __spufs_wbox_info_read(ctx, buf, len, pos);
>> + count = spufs_wbox_info_cnt(ctx);
>> + memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
>> spin_unlock(&ctx->csa.register_lock);
>> spu_release_saved(ctx);
>>
>> - return ret;
>> + return simple_read_from_buffer(buf, len, pos, &data,
>> + count * sizeof(u32));
>> }
>>
>> static const struct file_operations spufs_wbox_info_fops = {
>> @@ -2084,27 +2104,33 @@ static const struct file_operations spufs_wbox_info_fops = {
>> .llseek = generic_file_llseek,
>> };
>>
>> -static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
>> - char __user *buf, size_t len, loff_t *pos)
>> +static void ___spufs_dma_info_read(struct spu_context *ctx,
>> + struct spu_dma_info *info)
>> {
>> - struct spu_dma_info info;
>> - struct mfc_cq_sr *qp, *spuqp;
>> int i;
>>
>> - info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
>> - info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
>> - info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
>> - info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
>> - info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
>> + info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
>> + info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
>> + info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
>> + info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
>> + info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
>> for (i = 0; i < 16; i++) {
>> - qp = &info.dma_info_command_data[i];
>> - spuqp = &ctx->csa.priv2.spuq[i];
>> + struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
>> + struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
>>
>> qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
>> qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
>> qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
>> qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
>> }
>> +}
>> +
>> +static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
>> + char __user *buf, size_t len, loff_t *pos)
>> +{
>> + struct spu_dma_info info;
>> +
>> + ___spufs_dma_info_read(ctx, &info);
>>
>> return simple_read_from_buffer(buf, len, pos, &info,
>> sizeof info);
>> @@ -2114,6 +2140,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
>> size_t len, loff_t *pos)
>> {
>> struct spu_context *ctx = file->private_data;
>> + struct spu_dma_info info;
>> int ret;
>>
>> if (!access_ok(buf, len))
>> @@ -2123,11 +2150,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
>> if (ret)
>> return ret;
>> spin_lock(&ctx->csa.register_lock);
>> - ret = __spufs_dma_info_read(ctx, buf, len, pos);
>> + ___spufs_dma_info_read(ctx, &info);
>> spin_unlock(&ctx->csa.register_lock);
>> spu_release_saved(ctx);
>>
>> - return ret;
>> + return simple_read_from_buffer(buf, len, pos, &info,
>> + sizeof(info));
>> }
>>
>> static const struct file_operations spufs_dma_info_fops = {
>> @@ -2136,13 +2164,31 @@ static const struct file_operations spufs_dma_info_fops = {
>> .llseek = no_llseek,
>> };
>>
>> +static void ___spufs_proxydma_info_read(struct spu_context *ctx,
>> + struct spu_proxydma_info *info)
>> +{
>> + int i;
>> +
>> + info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
>> + info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
>> + info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
>> +
>> + for (i = 0; i < 8; i++) {
>> + struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
>> + struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
>> +
>> + qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
>> + qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
>> + qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
>> + qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
>> + }
>> +}
>> +
>> static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
>> char __user *buf, size_t len, loff_t *pos)
>> {
>> struct spu_proxydma_info info;
>> - struct mfc_cq_sr *qp, *puqp;
>> int ret = sizeof info;
>> - int i;
>>
>> if (len < ret)
>> return -EINVAL;
>> @@ -2150,18 +2196,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
>> if (!access_ok(buf, len))
>> return -EFAULT;
>>
>> - info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
>> - info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
>> - info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
>> - for (i = 0; i < 8; i++) {
>> - qp = &info.proxydma_info_command_data[i];
>> - puqp = &ctx->csa.priv2.puq[i];
>> -
>> - qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
>> - qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
>> - qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
>> - qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
>> - }
>> + ___spufs_proxydma_info_read(ctx, &info);
>>
>> return simple_read_from_buffer(buf, len, pos, &info,
>> sizeof info);
>> @@ -2171,17 +2206,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
>> size_t len, loff_t *pos)
>> {
>> struct spu_context *ctx = file->private_data;
>> + struct spu_proxydma_info info;
>> int ret;
>>
>> ret = spu_acquire_saved(ctx);
>> if (ret)
>> return ret;
>> spin_lock(&ctx->csa.register_lock);
>> - ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
>> + ___spufs_proxydma_info_read(ctx, &info);
>> spin_unlock(&ctx->csa.register_lock);
>> spu_release_saved(ctx);
>>
>> - return ret;
>> + return simple_read_from_buffer(buf, len, pos, &info,
>> + sizeof(info));
>> }
>>
>> static const struct file_operations spufs_proxydma_info_fops = {
>> --
>> 2.17.1
> ---end quoted text---
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic
2020-05-05 7:20 ` Michael Ellerman
@ 2020-05-05 7:30 ` Christoph Hellwig
0 siblings, 0 replies; 9+ messages in thread
From: Christoph Hellwig @ 2020-05-05 7:30 UTC (permalink / raw)
To: Michael Ellerman
Cc: linuxppc-dev, Christoph Hellwig, Arnd Bergmann, Jeremy Kerr
On Tue, May 05, 2020 at 05:20:54PM +1000, Michael Ellerman wrote:
> Christoph Hellwig <hch@lst.de> writes:
> > powerpc mantainers,
>
> There's only one of me.
>
> > are you going to pick this up for the next -rc1? I'm waiting for it to
> > hit upstream before resending the coredump series.
>
> I thought you were going to take it in your series.
Ok, I'll pick it up.
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2020-05-05 8:21 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-29 7:03 [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic Jeremy Kerr
2020-04-29 7:03 ` [PATCH 2/2] powerpc/spufs: stop using access_ok Jeremy Kerr
2020-04-29 15:00 ` Christophe Leroy
2020-04-30 0:39 ` Jeremy Kerr
2020-04-30 5:39 ` Christoph Hellwig
2020-04-30 6:18 ` Christophe Leroy
2020-05-04 13:04 ` [PATCH 1/2] powerpc/spufs: fix copy_to_user while atomic Christoph Hellwig
2020-05-05 7:20 ` Michael Ellerman
2020-05-05 7:30 ` Christoph Hellwig
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).