From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932678AbcJNXLf (ORCPT ); Fri, 14 Oct 2016 19:11:35 -0400 Received: from mga07.intel.com ([134.134.136.100]:30080 "EHLO mga07.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754869AbcJNXKI (ORCPT ); Fri, 14 Oct 2016 19:10:08 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.31,495,1473145200"; d="scan'208";a="19747849" From: "Fenghua Yu" To: "Thomas Gleixner" Cc: "H. Peter Anvin" , "Ingo Molnar" , "Tony Luck" , "Peter Zijlstra" , "Stephane Eranian" , "Borislav Petkov" , "Dave Hansen" , "Nilay Vaish" , "Shaohua Li" , "David Carrillo-Cisneros" , "Ravi V Shankar" , "Sai Prakhya" , "Vikas Shivappa" , "linux-kernel" , "x86" , "Fenghua Yu" Subject: [PATCH v4 15/18] x86/intel_rdt: Add tasks files Date: Fri, 14 Oct 2016 19:12:25 -0700 Message-Id: <1476497548-11169-16-git-send-email-fenghua.yu@intel.com> X-Mailer: git-send-email 1.8.0.1 In-Reply-To: <1476497548-11169-1-git-send-email-fenghua.yu@intel.com> References: <1476497548-11169-1-git-send-email-fenghua.yu@intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Fenghua Yu The root directory all subdirectories are automatically populated with a read/write (mode 0644) file named "tasks". When read it will show all the task IDs assigned to the resource group. Tasks can be added (one at a time) to a group by writing the task ID to the file. E.g. Membership in a resource group is indicated by a new field in the task_struct "int closid" which holds the CLOSID for each task. The default resource group uses CLOSID=0 which means that all existing tasks when the resctrl file system is mounted belong to the default group. A resource group cannot be removed while there are tasks assigned to it. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 173 +++++++++++++++++++++++++++++++ include/linux/sched.h | 3 + 2 files changed, 176 insertions(+) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index f2d7a3a..bdbe2d1 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -278,6 +279,152 @@ unlock: return ret ?: nbytes; } +struct task_move_callback { + struct callback_head work; + struct rdtgroup *rdtgrp; +}; + +static void move_myself(struct callback_head *head) +{ + struct task_move_callback *callback; + struct rdtgroup *rdtgrp; + + callback = container_of(head, struct task_move_callback, work); + rdtgrp = callback->rdtgrp; + + /* Resource group might have been deleted before process runs */ + if (atomic_dec_and_test(&rdtgrp->waitcount) && + (rdtgrp->flags & RDT_DELETED)) { + current->closid = 0; + kfree(rdtgrp); + } + + kfree(callback); +} + +static int __rdtgroup_move_task(struct task_struct *tsk, + struct rdtgroup *rdtgrp) +{ + struct task_move_callback *callback; + int ret; + + callback = kzalloc(sizeof(*callback), GFP_KERNEL); + if (!callback) + return -ENOMEM; + callback->work.func = move_myself; + callback->rdtgrp = rdtgrp; + atomic_inc(&rdtgrp->waitcount); + ret = task_work_add(tsk, &callback->work, true); + if (ret) { + atomic_dec(&rdtgrp->waitcount); + kfree(callback); + } else { + tsk->closid = rdtgrp->closid; + } + return ret; +} + +static int rdtgroup_task_write_permission(struct task_struct *task, + struct kernfs_open_file *of) +{ + const struct cred *cred = current_cred(); + const struct cred *tcred = get_task_cred(task); + int ret = 0; + + /* + * even if we're attaching all tasks in the thread group, we only + * need to check permissions on one of them. + */ + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && + !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) + ret = -EPERM; + + put_cred(tcred); + return ret; +} + +static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, + struct kernfs_open_file *of) +{ + struct task_struct *tsk; + int ret; + + rcu_read_lock(); + if (pid) { + tsk = find_task_by_vpid(pid); + if (!tsk) { + ret = -ESRCH; + goto out_unlock_rcu; + } + } else { + tsk = current; + } + + get_task_struct(tsk); + rcu_read_unlock(); + + ret = rdtgroup_task_write_permission(tsk, of); + if (!ret) + ret = __rdtgroup_move_task(tsk, rdtgrp); + + put_task_struct(tsk); + return ret; + +out_unlock_rcu: + rcu_read_unlock(); + return ret; +} + +static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + pid_t pid; + int ret = 0; + + if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) + return -EINVAL; + rdtgrp = rdtgroup_kn_lock_live(of->kn); + + if (rdtgrp) + ret = rdtgroup_move_task(pid, rdtgrp, of); + else + ret = -ENOENT; + + rdtgroup_kn_unlock(of->kn); + + return ret ?: nbytes; +} + +static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) +{ + struct task_struct *p; + + rcu_read_lock(); + for_each_process(p) { + if (p->closid == r->closid) + seq_printf(s, "%d\n", p->pid); + } + rcu_read_unlock(); +} + +static int rdtgroup_tasks_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + show_rdt_tasks(rdtgrp, s); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + /* Files in each rdtgroup */ static struct rftype rdtgroup_base_files[] = { { @@ -288,6 +435,13 @@ static struct rftype rdtgroup_base_files[] = { .seq_show = rdtgroup_cpus_show, }, { + .name = "tasks", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_tasks_write, + .seq_show = rdtgroup_tasks_show, + }, + { /* NULL terminated */ } }; @@ -559,6 +713,13 @@ static void rmdir_all_sub(void) { struct rdtgroup *rdtgrp; struct list_head *l, *next; + struct task_struct *p; + + /* move all tasks to default resource group */ + read_lock(&tasklist_lock); + for_each_process(p) + p->closid = 0; + read_unlock(&tasklist_lock); get_cpu(); /* Reset PQR_ASSOC MSR on this cpu. */ @@ -681,6 +842,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) { struct rdtgroup *rdtgrp; int ret = 0; + struct task_struct *p; rdtgrp = rdtgroup_kn_lock_live(kn); if (!rdtgrp) { @@ -698,6 +860,17 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) return -EPERM; } + /* Don't allow if there are processes in this group */ + read_lock(&tasklist_lock); + for_each_process(p) { + if (p->closid == rdtgrp->closid) { + read_unlock(&tasklist_lock); + rdtgroup_kn_unlock(kn); + return -EBUSY; + } + } + read_unlock(&tasklist_lock); + /* Give any CPUs back to the default group */ cpumask_or(&rdtgroup_default.cpu_mask, &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); diff --git a/include/linux/sched.h b/include/linux/sched.h index 62c68e5..8a05c46 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1766,6 +1766,9 @@ struct task_struct { /* cg_list protected by css_set_lock and tsk->alloc_lock */ struct list_head cg_list; #endif +#ifdef CONFIG_INTEL_RDT_A + int closid; +#endif #ifdef CONFIG_FUTEX struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT -- 2.5.0