linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: haoxin <xhao@linux.alibaba.com>
To: David Hildenbrand <david@redhat.com>, willy@infradead.org
Cc: akpm@linux-foundation.org, adobriyan@gmail.com,
	keescook@chromium.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org
Subject: Re: [RFC PATCH V4 1/1] mm: add last level page table numa info to /proc/pid/numa_pgtable
Date: Thu, 4 Aug 2022 16:04:17 +0800	[thread overview]
Message-ID: <bc0d16a6-e340-e261-82a0-e17bd236c2d9@linux.alibaba.com> (raw)
In-Reply-To: <0c1f9e76-9b1d-7069-bb09-c18e4f19f0c4@redhat.com>


在 2022/8/1 下午9:28, David Hildenbrand 写道:
> On 01.08.22 14:17, Xin Hao wrote:
>> In many data center servers, the shared memory architectures is
>> Non-Uniform Memory Access (NUMA), remote numa node data access
>> often brings a high latency problem, but what we are easy to ignore
>> is that the page table remote numa access, It can also leads to a
>> performance degradation.
> Let me try rewriting:
>
> "
> Many data center servers employ Non-Uniform Memory Access (NUMA)
> architectures. Remote numa memory access results in high latency. While
> memory placement is one issue, sub-optimal page table placement can also
> result in surprise performance degradation.
> "
Thanks,  it reads more clearly.

>> So there add a new interface in /proc, This will help developers to
>> get more info about performance issues if they are caused by cross-NUMA.
>
> Why do we only care about "last level page table", why not about the others?
>
> IMHO, we could emit something like "0, 1, 3, 0" instead for a given user
> space address, showing the NUMA node the page table belongs to from
> highest to lowest page table level.

I have planned to implement the PTE page table in this version first,  
and then support other page tables in the next patch later.

>
>> Reported-by: kernel test robot <lkp@intel.com>
> The kernel test robot reported that we need "/proc/pid/numa_pgtable" ?! :)
>
> Just drop that unless it's a follow-up fix.
Get it.
>
>> Signed-off-by: Xin Hao <xhao@linux.alibaba.com>
>> ---
>>   fs/proc/base.c     |  2 ++
>>   fs/proc/internal.h |  1 +
>>   fs/proc/task_mmu.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++
>>   3 files changed, 90 insertions(+)
>>
>> diff --git a/fs/proc/base.c b/fs/proc/base.c
>> index 8dfa36a99c74..487e82dd3275 100644
>> --- a/fs/proc/base.c
>> +++ b/fs/proc/base.c
>> @@ -3224,6 +3224,7 @@ static const struct pid_entry tgid_base_stuff[] = {
>>   	REG("maps",       S_IRUGO, proc_pid_maps_operations),
>>   #ifdef CONFIG_NUMA
>>   	REG("numa_maps",  S_IRUGO, proc_pid_numa_maps_operations),
>> +	REG("numa_pgtable", S_IRUGO, proc_pid_numa_pgtable_operations),
>>   #endif
>>   	REG("mem",        S_IRUSR|S_IWUSR, proc_mem_operations),
>>   	LNK("cwd",        proc_cwd_link),
>> @@ -3571,6 +3572,7 @@ static const struct pid_entry tid_base_stuff[] = {
>>   #endif
>>   #ifdef CONFIG_NUMA
>>   	REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations),
>> +	REG("numa_pgtable", S_IRUGO, proc_pid_numa_pgtable_operations),
>>   #endif
>>   	REG("mem",       S_IRUSR|S_IWUSR, proc_mem_operations),
>>   	LNK("cwd",       proc_cwd_link),
>> diff --git a/fs/proc/internal.h b/fs/proc/internal.h
>> index 06a80f78433d..e7ed9ef097b6 100644
>> --- a/fs/proc/internal.h
>> +++ b/fs/proc/internal.h
>> @@ -296,6 +296,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
>>
>>   extern const struct file_operations proc_pid_maps_operations;
>>   extern const struct file_operations proc_pid_numa_maps_operations;
>> +extern const struct file_operations proc_pid_numa_pgtable_operations;
>>   extern const struct file_operations proc_pid_smaps_operations;
>>   extern const struct file_operations proc_pid_smaps_rollup_operations;
>>   extern const struct file_operations proc_clear_refs_operations;
>> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
>> index 2d04e3470d4c..77b7a49757f5 100644
>> --- a/fs/proc/task_mmu.c
>> +++ b/fs/proc/task_mmu.c
>> @@ -1999,4 +1999,91 @@ const struct file_operations proc_pid_numa_maps_operations = {
>>   	.release	= proc_map_release,
>>   };
>>
>> +struct pgtable_numa_private {
>> +	struct proc_maps_private proc_maps;
>> +	unsigned long node[MAX_NUMNODES];
>> +};
>> +
>> +static int gather_pgtable_numa_stats(pmd_t *pmd, unsigned long addr,
>> +				     unsigned long end, struct mm_walk *walk)
>> +{
>> +	struct pgtable_numa_private *priv = walk->private;
>> +	struct page *page;
>> +	int nid;
>> +
>> +	if (pmd_huge(*pmd)) {
>> +		page = virt_to_page(pmd);
>> +	} else {
>> +		page = pmd_page(*pmd);
>> +	}
>> +
>> +	nid = page_to_nid(page);
>> +	priv->node[nid]++;
>> +
>> +	return 0;
>> +}
>> +
>> +static const struct mm_walk_ops show_numa_pgtable_ops = {
>> +	.pmd_entry = gather_pgtable_numa_stats,
>> +};
>> +
>> +/*
>> + * Display the page talbe allocated per node via /proc.
> s/talbe/table/
>
> but the comment somehow doesn't make sense. We don't display a page table.
>
>> + */
>> +static int show_numa_pgtable(struct seq_file *m, void *v)
>> +{
>> +	struct pgtable_numa_private *numa_priv = m->private;
>> +	struct vm_area_struct *vma = v;
>> +	struct mm_struct *mm = vma->vm_mm;
>> +	struct file *file = vma->vm_file;
>> +	int nid;
>> +
>> +	if (!mm)
>> +		return 0;
>> +
>> +	memset(numa_priv->node, 0, sizeof(numa_priv->node));
>> +
>> +	seq_printf(m, "%08lx ", vma->vm_start);
>> +
>> +	if (file) {
>> +		seq_puts(m, " file=");
>> +		seq_file_path(m, file, "\n\t= ");
>> +	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
>> +		seq_puts(m, " heap");
>> +	} else if (is_stack(vma)) {
>> +		seq_puts(m, " stack");
>> +	}
>> +
>> +	/* mmap_lock is held by m_start */
>> +	walk_page_vma(vma, &show_numa_pgtable_ops, numa_priv);
>> +
>> +	for_each_node_state(nid, N_MEMORY) {
>> +		if (numa_priv->node[nid])
>> +			seq_printf(m, " N%d=%lu", nid, numa_priv->node[nid]);
>> +	}
>> +	seq_putc(m, '\n');
>> +
>> +	return 0;
>> +}
>> +
>> +static const struct seq_operations proc_pid_numa_pgtable_op = {
>> +	.start  = m_start,
>> +	.next   = m_next,
>> +	.stop   = m_stop,
>> +	.show   = show_numa_pgtable,
>> +};
>> +
>> +static int pid_numa_pgtable_open(struct inode *inode, struct file *file)
>> +{
>> +	return proc_maps_open(inode, file, &proc_pid_numa_pgtable_op,
>> +			sizeof(struct pgtable_numa_private));
>> +}
>> +
>> +const struct file_operations proc_pid_numa_pgtable_operations = {
>> +	.open		= pid_numa_pgtable_open,
>> +	.read		= seq_read,
>> +	.llseek		= seq_lseek,
>> +	.release	= proc_map_release,
>> +};
>> +
>>   #endif /* CONFIG_NUMA */
>> --
>> 2.31.0
>>
>

  reply	other threads:[~2022-08-04  8:04 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-01 12:17 [RFC PATCH V4 0/1] mm: add last level page table numa info to /proc/pid/numa_pgtable Xin Hao
2022-08-01 12:17 ` [RFC PATCH V4 1/1] " Xin Hao
2022-08-01 13:28   ` David Hildenbrand
2022-08-04  8:04     ` haoxin [this message]
2022-08-04  8:12       ` David Hildenbrand
2022-08-04  9:30         ` haoxin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=bc0d16a6-e340-e261-82a0-e17bd236c2d9@linux.alibaba.com \
    --to=xhao@linux.alibaba.com \
    --cc=adobriyan@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=keescook@chromium.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).