All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Eric W. Biederman" <ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
To: Linux Containers
	<containers-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
Cc: esyr-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org,
	jannh-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org,
	khlebnikov-XoJtRXgx1JseBXzfvpsJ4g@public.gmane.org,
	linux-api-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	serge.hallyn-GeWIH/nMZzLQT0dZR+AlfA@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	prakash.sangappa-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org,
	linux-security-module-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	"Eric W. Biederman"
	<ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>,
	luto-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org,
	oleg-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org,
	akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org,
	Nagarathnam Muthusamy
	<nagarathnam.muthusamy-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>,
	Pavel Emelyanov <xemul-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org>
Subject: [REVIEW][PATCH 11/11] ipc/sem: Fix semctl(..., GETPID, ...) between pid namespaces
Date: Fri, 23 Mar 2018 14:16:14 -0500	[thread overview]
Message-ID: <20180323191614.32489-11-ebiederm@xmission.com> (raw)
In-Reply-To: <87vadmobdw.fsf_-_-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>

Today the last process to update a semaphore is remembered and
reported in the pid namespace of that process.  If there are processes
in any other pid namespace querying that process id with GETPID the
result will be unusable nonsense as it does not make any
sense in your own pid namespace.

Due to ipc_update_pid I don't think you will be able to get System V
ipc semaphores into a troublesome cache line ping-pong.  Using struct
pids from separate process are not a problem because they do not share
a cache line.  Using struct pid from different threads of the same
process are unlikely to be a problem as the reference count update
can be avoided.

Further linux futexes are a much better tool for the job of mutual
exclusion between processes than System V semaphores.  So I expect
programs that  are performance limited by their interprocess mutual
exclusion primitive will be using futexes.

So while it is possible that enhancing the storage of the last
rocess of a System V semaphore from an integer to a struct pid
will cause a performance regression because of the effect
of frequently updating the pid reference count.  I don't expect
that to happen in practice.

This change updates semctl(..., GETPID, ...) to return the
process id of the last process to update a semphore inthe
pid namespace of the calling process.

Fixes: b488893a390e ("pid namespaces: changes to show virtual ids to user")
Signed-off-by: "Eric W. Biederman" <ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
---
 ipc/sem.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/ipc/sem.c b/ipc/sem.c
index d661c491b0a5..47b263960524 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -98,7 +98,7 @@ struct sem {
 	 *  - semctl, via SETVAL and SETALL.
 	 *  - at task exit when performing undo adjustments (see exit_sem).
 	 */
-	int	sempid;
+	struct pid *sempid;
 	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 	struct list_head pending_alter; /* pending single-sop operations */
 					/* that alter the semaphore */
@@ -128,7 +128,7 @@ struct sem_queue {
 	struct list_head	list;	 /* queue of pending operations */
 	struct task_struct	*sleeper; /* this process */
 	struct sem_undo		*undo;	 /* undo structure */
-	int			pid;	 /* process id of requesting process */
+	struct pid		*pid;	 /* process id of requesting process */
 	int			status;	 /* completion status of operation */
 	struct sembuf		*sops;	 /* array of pending operations */
 	struct sembuf		*blocking; /* the operation that blocked */
@@ -628,7 +628,8 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
  */
 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 {
-	int result, sem_op, nsops, pid;
+	int result, sem_op, nsops;
+	struct pid *pid;
 	struct sembuf *sop;
 	struct sem *curr;
 	struct sembuf *sops;
@@ -666,7 +667,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 	sop--;
 	pid = q->pid;
 	while (sop >= sops) {
-		sma->sems[sop->sem_num].sempid = pid;
+		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 		sop--;
 	}
 
@@ -753,7 +754,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 			un->semadj[sop->sem_num] = undo;
 		}
 		curr->semval += sem_op;
-		curr->sempid = q->pid;
+		ipc_update_pid(&curr->sempid, q->pid);
 	}
 
 	return 0;
@@ -1160,6 +1161,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 			unlink_queue(sma, q);
 			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
 		}
+		ipc_update_pid(&sem->sempid, NULL);
 	}
 
 	/* Remove the semaphore set from the IDR */
@@ -1352,7 +1354,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 		un->semadj[semnum] = 0;
 
 	curr->semval = val;
-	curr->sempid = task_tgid_vnr(current);
+	ipc_update_pid(&curr->sempid, task_tgid(current));
 	sma->sem_ctime = ktime_get_real_seconds();
 	/* maybe some queued-up processes were waiting for this */
 	do_smart_update(sma, NULL, 0, 0, &wake_q);
@@ -1473,7 +1475,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 
 		for (i = 0; i < nsems; i++) {
 			sma->sems[i].semval = sem_io[i];
-			sma->sems[i].sempid = task_tgid_vnr(current);
+			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
 		}
 
 		ipc_assert_locked_object(&sma->sem_perm);
@@ -1505,7 +1507,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 		err = curr->semval;
 		goto out_unlock;
 	case GETPID:
-		err = curr->sempid;
+		err = pid_vnr(curr->sempid);
 		goto out_unlock;
 	case GETNCNT:
 		err = count_semcnt(sma, semnum, 0);
@@ -2024,7 +2026,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
 	queue.sops = sops;
 	queue.nsops = nsops;
 	queue.undo = un;
-	queue.pid = task_tgid_vnr(current);
+	queue.pid = task_tgid(current);
 	queue.alter = alter;
 	queue.dupsop = dupsop;
 
@@ -2318,7 +2320,7 @@ void exit_sem(struct task_struct *tsk)
 					semaphore->semval = 0;
 				if (semaphore->semval > SEMVMX)
 					semaphore->semval = SEMVMX;
-				semaphore->sempid = task_tgid_vnr(current);
+				ipc_update_pid(&semaphore->sempid, task_tgid(current));
 			}
 		}
 		/* maybe some queued-up processes were waiting for this */
-- 
2.14.1

WARNING: multiple messages have this Message-ID (diff)
From: "Eric W. Biederman" <ebiederm@xmission.com>
To: Linux Containers <containers@lists.linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-api@vger.kernel.org,
	khlebnikov@yandex-team.ru, prakash.sangappa@oracle.com,
	luto@kernel.org, akpm@linux-foundation.org, oleg@redhat.com,
	serge.hallyn@ubuntu.com, esyr@redhat.com, jannh@google.com,
	linux-security-module@vger.kernel.org,
	Pavel Emelyanov <xemul@openvz.org>,
	Nagarathnam Muthusamy <nagarathnam.muthusamy@oracle.com>,
	"Eric W. Biederman" <ebiederm@xmission.com>
Subject: [REVIEW][PATCH 11/11] ipc/sem: Fix semctl(..., GETPID, ...) between pid namespaces
Date: Fri, 23 Mar 2018 14:16:14 -0500	[thread overview]
Message-ID: <20180323191614.32489-11-ebiederm@xmission.com> (raw)
In-Reply-To: <87vadmobdw.fsf_-_@xmission.com>

Today the last process to update a semaphore is remembered and
reported in the pid namespace of that process.  If there are processes
in any other pid namespace querying that process id with GETPID the
result will be unusable nonsense as it does not make any
sense in your own pid namespace.

Due to ipc_update_pid I don't think you will be able to get System V
ipc semaphores into a troublesome cache line ping-pong.  Using struct
pids from separate process are not a problem because they do not share
a cache line.  Using struct pid from different threads of the same
process are unlikely to be a problem as the reference count update
can be avoided.

Further linux futexes are a much better tool for the job of mutual
exclusion between processes than System V semaphores.  So I expect
programs that  are performance limited by their interprocess mutual
exclusion primitive will be using futexes.

So while it is possible that enhancing the storage of the last
rocess of a System V semaphore from an integer to a struct pid
will cause a performance regression because of the effect
of frequently updating the pid reference count.  I don't expect
that to happen in practice.

This change updates semctl(..., GETPID, ...) to return the
process id of the last process to update a semphore inthe
pid namespace of the calling process.

Fixes: b488893a390e ("pid namespaces: changes to show virtual ids to user")
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
---
 ipc/sem.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/ipc/sem.c b/ipc/sem.c
index d661c491b0a5..47b263960524 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -98,7 +98,7 @@ struct sem {
 	 *  - semctl, via SETVAL and SETALL.
 	 *  - at task exit when performing undo adjustments (see exit_sem).
 	 */
-	int	sempid;
+	struct pid *sempid;
 	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 	struct list_head pending_alter; /* pending single-sop operations */
 					/* that alter the semaphore */
@@ -128,7 +128,7 @@ struct sem_queue {
 	struct list_head	list;	 /* queue of pending operations */
 	struct task_struct	*sleeper; /* this process */
 	struct sem_undo		*undo;	 /* undo structure */
-	int			pid;	 /* process id of requesting process */
+	struct pid		*pid;	 /* process id of requesting process */
 	int			status;	 /* completion status of operation */
 	struct sembuf		*sops;	 /* array of pending operations */
 	struct sembuf		*blocking; /* the operation that blocked */
@@ -628,7 +628,8 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
  */
 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 {
-	int result, sem_op, nsops, pid;
+	int result, sem_op, nsops;
+	struct pid *pid;
 	struct sembuf *sop;
 	struct sem *curr;
 	struct sembuf *sops;
@@ -666,7 +667,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 	sop--;
 	pid = q->pid;
 	while (sop >= sops) {
-		sma->sems[sop->sem_num].sempid = pid;
+		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 		sop--;
 	}
 
@@ -753,7 +754,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 			un->semadj[sop->sem_num] = undo;
 		}
 		curr->semval += sem_op;
-		curr->sempid = q->pid;
+		ipc_update_pid(&curr->sempid, q->pid);
 	}
 
 	return 0;
@@ -1160,6 +1161,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 			unlink_queue(sma, q);
 			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
 		}
+		ipc_update_pid(&sem->sempid, NULL);
 	}
 
 	/* Remove the semaphore set from the IDR */
@@ -1352,7 +1354,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 		un->semadj[semnum] = 0;
 
 	curr->semval = val;
-	curr->sempid = task_tgid_vnr(current);
+	ipc_update_pid(&curr->sempid, task_tgid(current));
 	sma->sem_ctime = ktime_get_real_seconds();
 	/* maybe some queued-up processes were waiting for this */
 	do_smart_update(sma, NULL, 0, 0, &wake_q);
@@ -1473,7 +1475,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 
 		for (i = 0; i < nsems; i++) {
 			sma->sems[i].semval = sem_io[i];
-			sma->sems[i].sempid = task_tgid_vnr(current);
+			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
 		}
 
 		ipc_assert_locked_object(&sma->sem_perm);
@@ -1505,7 +1507,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 		err = curr->semval;
 		goto out_unlock;
 	case GETPID:
-		err = curr->sempid;
+		err = pid_vnr(curr->sempid);
 		goto out_unlock;
 	case GETNCNT:
 		err = count_semcnt(sma, semnum, 0);
@@ -2024,7 +2026,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
 	queue.sops = sops;
 	queue.nsops = nsops;
 	queue.undo = un;
-	queue.pid = task_tgid_vnr(current);
+	queue.pid = task_tgid(current);
 	queue.alter = alter;
 	queue.dupsop = dupsop;
 
@@ -2318,7 +2320,7 @@ void exit_sem(struct task_struct *tsk)
 					semaphore->semval = 0;
 				if (semaphore->semval > SEMVMX)
 					semaphore->semval = SEMVMX;
-				semaphore->sempid = task_tgid_vnr(current);
+				ipc_update_pid(&semaphore->sempid, task_tgid(current));
 			}
 		}
 		/* maybe some queued-up processes were waiting for this */
-- 
2.14.1


WARNING: multiple messages have this Message-ID (diff)
From: ebiederm@xmission.com (Eric W. Biederman)
To: linux-security-module@vger.kernel.org
Subject: [REVIEW][PATCH 11/11] ipc/sem: Fix semctl(..., GETPID, ...) between pid namespaces
Date: Fri, 23 Mar 2018 14:16:14 -0500	[thread overview]
Message-ID: <20180323191614.32489-11-ebiederm@xmission.com> (raw)
In-Reply-To: <87vadmobdw.fsf_-_@xmission.com>

Today the last process to update a semaphore is remembered and
reported in the pid namespace of that process.  If there are processes
in any other pid namespace querying that process id with GETPID the
result will be unusable nonsense as it does not make any
sense in your own pid namespace.

Due to ipc_update_pid I don't think you will be able to get System V
ipc semaphores into a troublesome cache line ping-pong.  Using struct
pids from separate process are not a problem because they do not share
a cache line.  Using struct pid from different threads of the same
process are unlikely to be a problem as the reference count update
can be avoided.

Further linux futexes are a much better tool for the job of mutual
exclusion between processes than System V semaphores.  So I expect
programs that  are performance limited by their interprocess mutual
exclusion primitive will be using futexes.

So while it is possible that enhancing the storage of the last
rocess of a System V semaphore from an integer to a struct pid
will cause a performance regression because of the effect
of frequently updating the pid reference count.  I don't expect
that to happen in practice.

This change updates semctl(..., GETPID, ...) to return the
process id of the last process to update a semphore inthe
pid namespace of the calling process.

Fixes: b488893a390e ("pid namespaces: changes to show virtual ids to user")
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
---
 ipc/sem.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/ipc/sem.c b/ipc/sem.c
index d661c491b0a5..47b263960524 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -98,7 +98,7 @@ struct sem {
 	 *  - semctl, via SETVAL and SETALL.
 	 *  - at task exit when performing undo adjustments (see exit_sem).
 	 */
-	int	sempid;
+	struct pid *sempid;
 	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
 	struct list_head pending_alter; /* pending single-sop operations */
 					/* that alter the semaphore */
@@ -128,7 +128,7 @@ struct sem_queue {
 	struct list_head	list;	 /* queue of pending operations */
 	struct task_struct	*sleeper; /* this process */
 	struct sem_undo		*undo;	 /* undo structure */
-	int			pid;	 /* process id of requesting process */
+	struct pid		*pid;	 /* process id of requesting process */
 	int			status;	 /* completion status of operation */
 	struct sembuf		*sops;	 /* array of pending operations */
 	struct sembuf		*blocking; /* the operation that blocked */
@@ -628,7 +628,8 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
  */
 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 {
-	int result, sem_op, nsops, pid;
+	int result, sem_op, nsops;
+	struct pid *pid;
 	struct sembuf *sop;
 	struct sem *curr;
 	struct sembuf *sops;
@@ -666,7 +667,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 	sop--;
 	pid = q->pid;
 	while (sop >= sops) {
-		sma->sems[sop->sem_num].sempid = pid;
+		ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
 		sop--;
 	}
 
@@ -753,7 +754,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 			un->semadj[sop->sem_num] = undo;
 		}
 		curr->semval += sem_op;
-		curr->sempid = q->pid;
+		ipc_update_pid(&curr->sempid, q->pid);
 	}
 
 	return 0;
@@ -1160,6 +1161,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 			unlink_queue(sma, q);
 			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
 		}
+		ipc_update_pid(&sem->sempid, NULL);
 	}
 
 	/* Remove the semaphore set from the IDR */
@@ -1352,7 +1354,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 		un->semadj[semnum] = 0;
 
 	curr->semval = val;
-	curr->sempid = task_tgid_vnr(current);
+	ipc_update_pid(&curr->sempid, task_tgid(current));
 	sma->sem_ctime = ktime_get_real_seconds();
 	/* maybe some queued-up processes were waiting for this */
 	do_smart_update(sma, NULL, 0, 0, &wake_q);
@@ -1473,7 +1475,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 
 		for (i = 0; i < nsems; i++) {
 			sma->sems[i].semval = sem_io[i];
-			sma->sems[i].sempid = task_tgid_vnr(current);
+			ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
 		}
 
 		ipc_assert_locked_object(&sma->sem_perm);
@@ -1505,7 +1507,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 		err = curr->semval;
 		goto out_unlock;
 	case GETPID:
-		err = curr->sempid;
+		err = pid_vnr(curr->sempid);
 		goto out_unlock;
 	case GETNCNT:
 		err = count_semcnt(sma, semnum, 0);
@@ -2024,7 +2026,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
 	queue.sops = sops;
 	queue.nsops = nsops;
 	queue.undo = un;
-	queue.pid = task_tgid_vnr(current);
+	queue.pid = task_tgid(current);
 	queue.alter = alter;
 	queue.dupsop = dupsop;
 
@@ -2318,7 +2320,7 @@ void exit_sem(struct task_struct *tsk)
 					semaphore->semval = 0;
 				if (semaphore->semval > SEMVMX)
 					semaphore->semval = SEMVMX;
-				semaphore->sempid = task_tgid_vnr(current);
+				ipc_update_pid(&semaphore->sempid, task_tgid(current));
 			}
 		}
 		/* maybe some queued-up processes were waiting for this */
-- 
2.14.1

--
To unsubscribe from this list: send the line "unsubscribe linux-security-module" in
the body of a message to majordomo at vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

  parent reply	other threads:[~2018-03-23 19:16 UTC|newest]

Thread overview: 125+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-12 17:18 [RESEND RFC] translate_pid API nagarathnam.muthusamy
2018-03-13 20:47 ` Jann Horn
2018-03-13 21:20   ` Nagarathnam Muthusamy
2018-03-13 21:28     ` Jann Horn
2018-03-13 21:44       ` Nagarathnam Muthusamy
2018-03-13 22:00         ` Jann Horn
2018-03-13 22:45           ` Nagarathnam Muthusamy
2018-03-13 23:10             ` Jann Horn
2018-03-13 23:52               ` Nagarathnam Muthusamy
2018-03-14  3:29 ` Eric W. Biederman
2018-03-14 21:22   ` Nagarathnam Muthusamy
2018-03-14 22:03     ` Eric W. Biederman
2018-03-20 20:14       ` Nagarathnam Muthusamy
2018-03-21  0:33         ` Eric W. Biederman
     [not found]           ` <87a7v2z2qa.fsf-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-23 19:11             ` [REVIEW][PATCH 00/11] ipc: Fixing the pid namespace support Eric W. Biederman
2018-03-23 19:11               ` Eric W. Biederman
2018-03-23 19:11               ` Eric W. Biederman
2018-03-23 19:16               ` [REVIEW][PATCH 03/11] msg/security: Pass kern_ipc_perm not msg_queue into the msg_queue security hooks Eric W. Biederman
2018-03-23 19:16                 ` Eric W. Biederman
2018-03-23 21:55                 ` Casey Schaufler
2018-03-23 21:55                   ` Casey Schaufler
     [not found]                   ` <bb73b0ea-bcda-a996-8f14-48d9dd1b0940-iSGtlc1asvQWG2LlvL+J4A@public.gmane.org>
2018-03-24  5:37                     ` Eric W. Biederman
2018-03-24  5:37                       ` Eric W. Biederman
2018-03-24  5:37                       ` Eric W. Biederman
     [not found]                 ` <20180323191614.32489-3-ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-23 21:55                   ` Casey Schaufler
2018-03-23 19:16               ` [REVIEW][PATCH 05/11] shm: Move struct shmid_kernel into ipc/shm.c Eric W. Biederman
2018-03-23 19:16                 ` Eric W. Biederman
     [not found]               ` <87vadmobdw.fsf_-_-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-23 19:16                 ` [REVIEW][PATCH 01/11] sem/security: Pass kern_ipc_perm not sem_array into the sem security hooks Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 21:46                   ` Casey Schaufler
2018-03-23 21:46                     ` Casey Schaufler
2018-03-28 23:20                     ` Davidlohr Bueso
2018-03-28 23:20                       ` Davidlohr Bueso
     [not found]                     ` <bdf6ed62-b75c-1920-d5ce-ea08428d03d0-iSGtlc1asvQWG2LlvL+J4A@public.gmane.org>
2018-03-28 23:20                       ` Davidlohr Bueso
     [not found]                   ` <20180323191614.32489-1-ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-23 21:46                     ` Casey Schaufler
2018-03-23 19:16                 ` [REVIEW][PATCH 02/11] shm/security: Pass kern_ipc_perm not shmid_kernel into the shm " Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 21:54                   ` Casey Schaufler
2018-03-23 21:54                     ` Casey Schaufler
     [not found]                   ` <20180323191614.32489-2-ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-23 21:54                     ` Casey Schaufler
2018-03-23 19:16                 ` [REVIEW][PATCH 03/11] msg/security: Pass kern_ipc_perm not msg_queue into the msg_queue " Eric W. Biederman
2018-03-23 19:16                 ` [REVIEW][PATCH 04/11] sem: Move struct sem and struct sem_array into ipc/sem.c Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 19:16                 ` [REVIEW][PATCH 05/11] shm: Move struct shmid_kernel into ipc/shm.c Eric W. Biederman
2018-03-23 19:16                 ` [REVIEW][PATCH 06/11] msg: Move struct msg_queue into ipc/msg.c Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 19:16                 ` [REVIEW][PATCH 07/11] ipc: Move IPCMNI from include/ipc.h into ipc/util.h Eric W. Biederman
2018-03-23 19:16                 ` [REVIEW][PATCH 08/11] ipc/util: Helpers for making the sysvipc operations pid namespace aware Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-23 19:16                 ` [REVIEW][PATCH 09/11] ipc/shm: Fix shmctl(..., IPC_STAT, ...) between pid namespaces Eric W. Biederman
2018-03-23 19:16                 ` [REVIEW][PATCH 10/11] ipc/msg: Fix msgctl(..., " Eric W. Biederman
2018-03-23 19:16                 ` Eric W. Biederman [this message]
2018-03-23 19:16                   ` [REVIEW][PATCH 11/11] ipc/sem: Fix semctl(..., GETPID, " Eric W. Biederman
2018-03-23 19:16                   ` Eric W. Biederman
2018-03-29  0:52                   ` Davidlohr Bueso
2018-03-29  0:52                     ` Davidlohr Bueso
2018-03-30 19:09                     ` Davidlohr Bueso
2018-03-30 19:09                       ` Davidlohr Bueso
2018-03-30 20:12                       ` Eric W. Biederman
2018-03-30 20:12                         ` Eric W. Biederman
2018-03-30 20:45                         ` Davidlohr Bueso
2018-03-30 20:45                           ` Davidlohr Bueso
     [not found]                         ` <87y3i91fxh.fsf-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-30 20:45                           ` Davidlohr Bueso
2018-03-30 20:12                       ` Eric W. Biederman
2018-04-02 11:11                       ` Manfred Spraul
2018-04-02 11:11                         ` Manfred Spraul
2018-04-02 11:11                       ` Manfred Spraul
2018-03-30 19:09                     ` Davidlohr Bueso
     [not found]                   ` <20180323191614.32489-11-ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-29  0:52                     ` Davidlohr Bueso
2018-03-24  5:40                 ` [REVIEW][PATCH 12/11] ipc: Directly call the security hook in ipc_ops.associate Eric W. Biederman
2018-03-24  5:40                   ` Eric W. Biederman
2018-03-24  5:40                   ` Eric W. Biederman
2018-03-28 23:40                   ` Davidlohr Bueso
2018-03-28 23:40                     ` Davidlohr Bueso
     [not found]                   ` <877eq2m3or.fsf_-_-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-28 23:40                     ` Davidlohr Bueso
2018-03-31  2:13                     ` James Morris
2018-03-31  2:13                       ` James Morris
2018-03-31  2:13                       ` James Morris
2018-03-24  5:42                 ` [REVIEW][PATCH 13/11] ipc/smack: Tidy up from the change in type of the ipc security hooks Eric W. Biederman
2018-03-29  1:12                 ` [REVIEW][PATCH 00/11] ipc: Fixing the pid namespace support Davidlohr Bueso
2018-03-29  1:12                   ` Davidlohr Bueso
2018-03-29  1:12                   ` Davidlohr Bueso
2018-03-29 18:42                   ` Eric W. Biederman
2018-03-29 18:42                     ` Eric W. Biederman
2018-03-29 18:42                   ` Eric W. Biederman
2018-03-23 19:16               ` [REVIEW][PATCH 07/11] ipc: Move IPCMNI from include/ipc.h into ipc/util.h Eric W. Biederman
2018-03-23 19:16                 ` Eric W. Biederman
2018-03-23 19:16               ` [REVIEW][PATCH 09/11] ipc/shm: Fix shmctl(..., IPC_STAT, ...) between pid namespaces Eric W. Biederman
2018-03-23 19:16                 ` Eric W. Biederman
     [not found]                 ` <20180323191614.32489-9-ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-23 21:17                   ` NAGARATHNAM MUTHUSAMY
2018-03-23 21:17                     ` NAGARATHNAM MUTHUSAMY
2018-03-23 21:17                     ` NAGARATHNAM MUTHUSAMY
     [not found]                     ` <7df62190-2407-bfd4-d144-7304a8ea8ae3-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2018-03-23 21:33                       ` Eric W. Biederman
2018-03-23 21:33                         ` Eric W. Biederman
2018-03-23 21:33                         ` Eric W. Biederman
     [not found]                         ` <87lgeio4tb.fsf-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-23 21:41                           ` NAGARATHNAM MUTHUSAMY
2018-03-23 21:41                             ` NAGARATHNAM MUTHUSAMY
2018-03-23 21:41                             ` NAGARATHNAM MUTHUSAMY
2018-03-28 23:04                             ` Eric W. Biederman
2018-03-28 23:04                               ` Eric W. Biederman
     [not found]                               ` <87woxvajk9.fsf-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-28 23:18                                 ` Nagarathnam Muthusamy
2018-03-28 23:18                                   ` Nagarathnam Muthusamy
2018-03-28 23:18                                   ` Nagarathnam Muthusamy
     [not found]                             ` <1091a91e-f8ee-b091-6d95-78b33520fb2d-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2018-03-28 23:04                               ` Eric W. Biederman
2018-03-23 19:16               ` [REVIEW][PATCH 10/11] ipc/msg: Fix msgctl(..., " Eric W. Biederman
2018-03-23 19:16                 ` Eric W. Biederman
     [not found]                 ` <20180323191614.32489-10-ebiederm-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-23 21:21                   ` NAGARATHNAM MUTHUSAMY
2018-03-23 21:21                     ` NAGARATHNAM MUTHUSAMY
2018-03-23 21:21                     ` NAGARATHNAM MUTHUSAMY
2018-03-24  5:42               ` [REVIEW][PATCH 13/11] ipc/smack: Tidy up from the change in type of the ipc security hooks Eric W. Biederman
2018-03-24  5:42                 ` Eric W. Biederman
     [not found]                 ` <87y3iikp1y.fsf_-_-aS9lmoZGLiVWk0Htik3J/w@public.gmane.org>
2018-03-25  0:05                   ` Casey Schaufler
2018-03-25  0:05                     ` Casey Schaufler
2018-03-25  0:05                     ` Casey Schaufler
     [not found]                     ` <80cd2fea-c9a8-4f26-acbb-e0ecb34e4e40-iSGtlc1asvQWG2LlvL+J4A@public.gmane.org>
2018-03-28 23:38                       ` Davidlohr Bueso
2018-03-28 23:38                     ` Davidlohr Bueso
2018-03-28 23:38                       ` Davidlohr Bueso
2018-03-28 23:57                   ` Davidlohr Bueso
2018-03-28 23:57                 ` Davidlohr Bueso
2018-03-28 23:57                   ` Davidlohr Bueso

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180323191614.32489-11-ebiederm@xmission.com \
    --to=ebiederm-as9lmozglivwk0htik3j/w@public.gmane.org \
    --cc=akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org \
    --cc=containers-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org \
    --cc=esyr-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=jannh-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org \
    --cc=khlebnikov-XoJtRXgx1JseBXzfvpsJ4g@public.gmane.org \
    --cc=linux-api-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=linux-security-module-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=luto-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org \
    --cc=nagarathnam.muthusamy-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org \
    --cc=oleg-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=prakash.sangappa-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org \
    --cc=serge.hallyn-GeWIH/nMZzLQT0dZR+AlfA@public.gmane.org \
    --cc=xemul-GEFAQzZX7r8dnm+yROfE0A@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.