bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Song Liu <songliubraving@fb.com>
To: Daniel Borkmann <daniel@iogearbox.net>
Cc: bpf <bpf@vger.kernel.org>,
	"netdev@vger.kernel.org" <netdev@vger.kernel.org>,
	"ast@kernel.org" <ast@kernel.org>,
	"andrii@kernel.org" <andrii@kernel.org>,
	Kernel Team <Kernel-team@fb.com>,
	"kpsingh@kernel.org" <kpsingh@kernel.org>
Subject: Re: [PATCH bpf-next 1/2] bpf: introduce helper bpf_find_vma
Date: Wed, 3 Nov 2021 23:54:29 +0000	[thread overview]
Message-ID: <65E6A08E-1B20-4CFA-9756-4759728D21E2@fb.com> (raw)
In-Reply-To: <368db409-c932-030b-a5bc-89efa56de289@iogearbox.net>



> On Nov 1, 2021, at 3:23 PM, Daniel Borkmann <daniel@iogearbox.net> wrote:
> 
> On 10/28/21 12:00 AM, Song Liu wrote:
> [...]
>>  /* integer value in 'imm' field of BPF_CALL instruction selects which helper
>> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
>> index b48750bfba5aa..ad30f2e885356 100644
>> --- a/kernel/bpf/task_iter.c
>> +++ b/kernel/bpf/task_iter.c
>> @@ -8,6 +8,7 @@
>>  #include <linux/fdtable.h>
>>  #include <linux/filter.h>
>>  #include <linux/btf_ids.h>
>> +#include <linux/irq_work.h>
>>    struct bpf_iter_seq_task_common {
>>  	struct pid_namespace *ns;
>> @@ -21,6 +22,25 @@ struct bpf_iter_seq_task_info {
>>  	u32 tid;
>>  };
>>  +/* irq_work to run mmap_read_unlock() */
>> +struct task_iter_irq_work {
>> +	struct irq_work irq_work;
>> +	struct mm_struct *mm;
>> +};
>> +
>> +static DEFINE_PER_CPU(struct task_iter_irq_work, mmap_unlock_work);
>> +
>> +static void do_mmap_read_unlock(struct irq_work *entry)
>> +{
>> +	struct task_iter_irq_work *work;
>> +
>> +	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
>> +		return;
>> +
>> +	work = container_of(entry, struct task_iter_irq_work, irq_work);
>> +	mmap_read_unlock_non_owner(work->mm);
>> +}
>> +
>>  static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
>>  					     u32 *tid,
>>  					     bool skip_if_dup_files)
>> @@ -586,9 +606,89 @@ static struct bpf_iter_reg task_vma_reg_info = {
>>  	.seq_info		= &task_vma_seq_info,
>>  };
>>  +BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
>> +	   bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
>> +{
>> +	struct task_iter_irq_work *work = NULL;
>> +	struct mm_struct *mm = task->mm;
> 
> Won't this NULL deref if called with task argument as NULL?

Will fix. 

> 
>> +	struct vm_area_struct *vma;
>> +	bool irq_work_busy = false;
>> +	int ret = -ENOENT;
>> +
>> +	if (flags)
>> +		return -EINVAL;
>> +
>> +	if (!mm)
>> +		return -ENOENT;
>> +
>> +	/*
>> +	 * Similar to stackmap with build_id support, we cannot simply do
>> +	 * mmap_read_unlock when the irq is disabled. Instead, we need do
>> +	 * the unlock in the irq_work.
>> +	 */
>> +	if (irqs_disabled()) {
>> +		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
>> +			work = this_cpu_ptr(&mmap_unlock_work);
>> +			if (irq_work_is_busy(&work->irq_work)) {
>> +				/* cannot queue more mmap_unlock, abort. */
>> +				irq_work_busy = true;
>> +			}
>> +		} else {
>> +			/*
>> +			 * PREEMPT_RT does not allow to trylock mmap sem in
>> +			 * interrupt disabled context, abort.
>> +			 */
>> +			irq_work_busy = true;
>> +		}
>> +	}
>> +
>> +	if (irq_work_busy || !mmap_read_trylock(mm))
>> +		return -EBUSY;
>> +
>> +	vma = find_vma(mm, start);
>> +
>> +	if (vma && vma->vm_start <= start && vma->vm_end > start) {
>> +		callback_fn((u64)(long)task, (u64)(long)vma,
>> +			    (u64)(long)callback_ctx, 0, 0);
>> +		ret = 0;
>> +	}
>> +	if (!work) {
>> +		mmap_read_unlock(current->mm);
>> +	} else {
>> +		work->mm = current->mm;
>> +
>> +		/* The lock will be released once we're out of interrupt
>> +		 * context. Tell lockdep that we've released it now so
>> +		 * it doesn't complain that we forgot to release it.
>> +		 */
>> +		rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
>> +		irq_work_queue(&work->irq_work);
>> +	}
> 
> Given this is pretty much the same logic around the vma retrieval, could this be
> refactored/consolidated with stack map build id retrieval into a common function?

I thought about sharing the irq_work code amount the two. The problem was we need
to include irq_work.h in bpf.h. But on a second thought, maybe we should just 
move bpf_find_vma to stackmap.c? This will avoid including irq_work.h. I guess it 
is not too weird to have bpf_find_vma in stackmap.c.  

> 
>> +	return ret;
>> +}
>> +
>> +BTF_ID_LIST_SINGLE(btf_find_vma_ids, struct, task_struct)
>> +
>> +const struct bpf_func_proto bpf_find_vma_proto = {
>> +	.func		= bpf_find_vma,
>> +	.ret_type	= RET_INTEGER,
>> +	.arg1_type	= ARG_PTR_TO_BTF_ID,
>> +	.arg1_btf_id	= &btf_find_vma_ids[0],
>> +	.arg2_type	= ARG_ANYTHING,
>> +	.arg3_type	= ARG_PTR_TO_FUNC,
>> +	.arg4_type	= ARG_PTR_TO_STACK_OR_NULL,
>> +	.arg5_type	= ARG_ANYTHING,
>> +};
> [...]
>> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
>> index c108200378834..056c00da1b5d6 100644
>> --- a/tools/include/uapi/linux/bpf.h
>> +++ b/tools/include/uapi/linux/bpf.h
>> @@ -4915,6 +4915,24 @@ union bpf_attr {
>>   *		Dynamically cast a *sk* pointer to a *unix_sock* pointer.
>>   *	Return
>>   *		*sk* if casting is valid, or **NULL** otherwise.
>> + * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
> 
> nit: Wrongly copied uapi header over to tooling?

Right... You get really good eyes. :-)

[...]

  reply	other threads:[~2021-11-03 23:54 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-27 22:00 [PATCH bpf-next 0/2] introduce bpf_find_vma Song Liu
2021-10-27 22:00 ` [PATCH bpf-next 1/2] bpf: introduce helper bpf_find_vma Song Liu
2021-11-01 22:23   ` Daniel Borkmann
2021-11-03 23:54     ` Song Liu [this message]
2021-11-04  5:56       ` Song Liu
2021-10-27 22:00 ` [PATCH bpf-next 2/2] selftests/bpf: add tests for bpf_find_vma Song Liu
2021-11-01 22:26   ` Daniel Borkmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=65E6A08E-1B20-4CFA-9756-4759728D21E2@fb.com \
    --to=songliubraving@fb.com \
    --cc=Kernel-team@fb.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kpsingh@kernel.org \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).