linux-modules.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] module: Merge same-name module load requests
@ 2022-09-05  8:41 Petr Pavlu
  2022-09-05 14:29 ` Petr Mladek
  2022-09-05 14:47 ` kernel test robot
  0 siblings, 2 replies; 4+ messages in thread
From: Petr Pavlu @ 2022-09-05  8:41 UTC (permalink / raw)
  To: mcgrof; +Cc: linux-modules, linux-kernel, pmladek, mwilck, Petr Pavlu

During a system boot, it can happen that the kernel receives a burst of
requests to insert the same module but loading it eventually fails
during its init call. For instance, udev can make a request to insert
a frequency module for each individual CPU when another frequency module
is already loaded which causes the init function of the new module to
return an error.

The module loader currently serializes all such requests, with the
barrier in add_unformed_module(). This creates a lot of unnecessary work
and delays the boot.

This patch improves the behavior as follows:
* A check whether a module load matches an already loaded module is
  moved right after a module name is determined.
* A new reference-counted shared_load_info structure is introduced to
  keep track of duplicate load requests. Two loads are considered
  equivalent if their module name matches. In case a load duplicates
  another running insert, the code waits for its completion and then
  returns -EEXIST or -ENODEV depending on whether it succeeded.

Note that prior to 6e6de3dee51a ("kernel/module.c: Only return -EEXIST
for modules that have finished loading"), the kernel already did merge
some of same load requests but it was more by accident and relied on
specific timing. The patch brings this behavior back in a more explicit
form.

Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
---
 kernel/module/main.c | 207 ++++++++++++++++++++++++++++++-------------
 1 file changed, 144 insertions(+), 63 deletions(-)

diff --git a/kernel/module/main.c b/kernel/module/main.c
index a4e4d84b6f4e..24d0777c48e3 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -61,14 +61,28 @@
 
 /*
  * Mutex protects:
- * 1) List of modules (also safely readable with preempt_disable),
+ * 1) list of modules (also safely readable with preempt_disable, delete and add
+ *    uses RCU list operations).
  * 2) module_use links,
- * 3) mod_tree.addr_min/mod_tree.addr_max.
- * (delete and add uses RCU list operations).
+ * 3) mod_tree.addr_min/mod_tree.addr_max,
+ * 4) list of unloaded_tainted_modules.
+ * 5) list of running_loads.
  */
 DEFINE_MUTEX(module_mutex);
 LIST_HEAD(modules);
 
+/* Shared information to track duplicate module loads. */
+struct shared_load_info {
+	char name[MODULE_NAME_LEN];
+	refcount_t refcnt;
+	struct list_head list;
+	int err;
+};
+LIST_HEAD(running_loads);
+
+/* Waiting for a module to finish loading? */
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
+
 /* Work queue for freeing init sections in success case */
 static void do_free_init(struct work_struct *w);
 static DECLARE_WORK(init_free_wq, do_free_init);
@@ -122,9 +136,6 @@ static void mod_update_bounds(struct module *mod)
 int modules_disabled;
 core_param(nomodule, modules_disabled, bint, 0);
 
-/* Waiting for a module to finish initializing? */
-static DECLARE_WAIT_QUEUE_HEAD(module_wq);
-
 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 
 int register_module_notifier(struct notifier_block *nb)
@@ -762,8 +773,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 	strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints));
 
 	free_module(mod);
-	/* someone could wait for the module in add_unformed_module() */
-	wake_up_all(&module_wq);
 	return 0;
 out:
 	mutex_unlock(&module_mutex);
@@ -2374,26 +2383,6 @@ static int post_relocation(struct module *mod, const struct load_info *info)
 	return module_finalize(info->hdr, info->sechdrs, mod);
 }
 
-/* Is this module of this name done loading?  No locks held. */
-static bool finished_loading(const char *name)
-{
-	struct module *mod;
-	bool ret;
-
-	/*
-	 * The module_mutex should not be a heavily contended lock;
-	 * if we get the occasional sleep here, we'll go an extra iteration
-	 * in the wait_event_interruptible(), which is harmless.
-	 */
-	sched_annotate_sleep();
-	mutex_lock(&module_mutex);
-	mod = find_module_all(name, strlen(name), true);
-	ret = !mod || mod->state == MODULE_STATE_LIVE;
-	mutex_unlock(&module_mutex);
-
-	return ret;
-}
-
 /* Call module constructors. */
 static void do_mod_ctors(struct module *mod)
 {
@@ -2524,7 +2513,6 @@ static noinline int do_init_module(struct module *mod)
 		schedule_work(&init_free_wq);
 
 	mutex_unlock(&module_mutex);
-	wake_up_all(&module_wq);
 
 	return 0;
 
@@ -2540,7 +2528,6 @@ static noinline int do_init_module(struct module *mod)
 	klp_module_going(mod);
 	ftrace_release_mod(mod);
 	free_module(mod);
-	wake_up_all(&module_wq);
 	return ret;
 }
 
@@ -2552,43 +2539,129 @@ static int may_init_module(void)
 	return 0;
 }
 
+static struct shared_load_info *
+shared_load_info_alloc(const struct load_info *info)
+{
+	struct shared_load_info *shared_info =
+		kzalloc(sizeof(*shared_info), GFP_KERNEL);
+	if (shared_info == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	strscpy(shared_info->name, info->name, sizeof(shared_info->name));
+	refcount_set(&shared_info->refcnt, 1);
+	INIT_LIST_HEAD(&shared_info->list);
+	return shared_info;
+}
+
+static void shared_load_info_get(struct shared_load_info *shared_info)
+{
+	refcount_inc(&shared_info->refcnt);
+}
+
+static void shared_load_info_put(struct shared_load_info *shared_info)
+{
+	if (refcount_dec_and_test(&shared_info->refcnt))
+		kfree(shared_info);
+}
+
 /*
- * We try to place it in the list now to make sure it's unique before
- * we dedicate too many resources.  In particular, temporary percpu
+ * Check that the module load is unique and make it visible to others. The code
+ * looks for parallel running inserts and already loaded modules. Two inserts
+ * are considered equivalent if their module name matches. In case this load
+ * duplicates another running insert, the code waits for its completion and
+ * then returns -EEXIST or -ENODEV depending on whether it succeeded.
+ *
+ * Detecting early that a load is unique avoids dedicating too many cycles and
+ * resources to bring up the module. In particular, it prevents temporary percpu
  * memory exhaustion.
+ *
+ * Merging same load requests then primarily helps during the boot process. It
+ * can happen that the kernel receives a burst of requests to load the same
+ * module (for example, a same module for each individual CPU) and loading it
+ * eventually fails during its init call. Merging the requests allows that only
+ * one full attempt to load the module is made.
+ *
+ * On a non-error return, it is guaranteed that this load is unique.
  */
-static int add_unformed_module(struct module *mod)
+static struct shared_load_info *add_running_load(const struct load_info *info)
 {
-	int err;
 	struct module *old;
+	struct shared_load_info *shared_info;
 
-	mod->state = MODULE_STATE_UNFORMED;
-
-again:
 	mutex_lock(&module_mutex);
-	old = find_module_all(mod->name, strlen(mod->name), true);
-	if (old != NULL) {
-		if (old->state != MODULE_STATE_LIVE) {
-			/* Wait in case it fails to load. */
+
+	/* Search if there is a running load of a module with the same name. */
+	list_for_each_entry(shared_info, &running_loads, list)
+		if (strcmp(shared_info->name, info->name) == 0) {
+			int err;
+
+			shared_load_info_get(shared_info);
 			mutex_unlock(&module_mutex);
+
 			err = wait_event_interruptible(module_wq,
-					       finished_loading(mod->name));
-			if (err)
-				goto out_unlocked;
-			goto again;
+						       shared_info->err != 0);
+			if (!err)
+				err = shared_info->err;
+			shared_load_info_put(shared_info);
+			shared_info = ERR_PTR(err);
+			goto out_unlocked;
 		}
-		err = -EEXIST;
+
+	/* Search if there is a live module with the given name already. */
+	old = find_module_all(info->name, strlen(info->name), true);
+	if (old != NULL) {
+		if (old->state == MODULE_STATE_LIVE) {
+			shared_info = ERR_PTR(-EEXIST);
+			goto out;
+		}
+
+		/*
+		 * Any active load always has its record in running_loads and so
+		 * would be found above. This applies independent whether such
+		 * a module is currently in MODULE_STATE_UNFORMED,
+		 * MODULE_STATE_COMING, or even in MODULE_STATE_GOING if its
+		 * initialization failed. It therefore means this must be an
+		 * older going module and the caller should try later once it is
+		 * gone.
+		 */
+		WARN_ON(old->state != MODULE_STATE_GOING);
+		shared_info = ERR_PTR(-EAGAIN);
 		goto out;
 	}
-	mod_update_bounds(mod);
-	list_add_rcu(&mod->list, &modules);
-	mod_tree_insert(mod);
-	err = 0;
+
+	/* The load is unique, make it visible to others. */
+	shared_info = shared_load_info_alloc(info);
+	if (IS_ERR(shared_info))
+		goto out;
+	list_add(&shared_info->list, &running_loads);
 
 out:
 	mutex_unlock(&module_mutex);
 out_unlocked:
-	return err;
+	return shared_info;
+}
+
+/* Complete the running load and inform other duplicate inserts about it. */
+static void finalize_running_load(struct shared_load_info *shared_info, int err)
+{
+	mutex_lock(&module_mutex);
+	list_del(&shared_info->list);
+	shared_info->err = err == 0 ? -EEXIST : -ENODEV;
+	mutex_unlock(&module_mutex);
+
+	wake_up_all(&module_wq);
+	shared_load_info_put(shared_info);
+}
+
+static void add_unformed_module(struct module *mod)
+{
+	mod->state = MODULE_STATE_UNFORMED;
+
+	mutex_lock(&module_mutex);
+	mod_update_bounds(mod);
+	list_add_rcu(&mod->list, &modules);
+	mod_tree_insert(mod);
+	mutex_unlock(&module_mutex);
 }
 
 static int complete_formation(struct module *mod, struct load_info *info)
@@ -2674,6 +2747,7 @@ static void cfi_init(struct module *mod);
 static int load_module(struct load_info *info, const char __user *uargs,
 		       int flags)
 {
+	struct shared_load_info *shared_info;
 	struct module *mod;
 	long err = 0;
 	char *after_dashes;
@@ -2711,38 +2785,43 @@ static int load_module(struct load_info *info, const char __user *uargs,
 		goto free_copy;
 
 	/*
-	 * Now that we know we have the correct module name, check
-	 * if it's blacklisted.
+	 * Now that we know we have the correct module name, check if there is
+	 * another load of the same name in progress.
 	 */
+	shared_info = add_running_load(info);
+	if (IS_ERR(shared_info)) {
+		err = PTR_ERR(shared_info);
+		goto free_copy;
+	}
+
+	/* Check if the module is blacklisted. */
 	if (blacklisted(info->name)) {
 		err = -EPERM;
 		pr_err("Module %s is blacklisted\n", info->name);
-		goto free_copy;
+		goto free_shared;
 	}
 
 	err = rewrite_section_headers(info, flags);
 	if (err)
-		goto free_copy;
+		goto free_shared;
 
 	/* Check module struct version now, before we try to use module. */
 	if (!check_modstruct_version(info, info->mod)) {
 		err = -ENOEXEC;
-		goto free_copy;
+		goto free_shared;
 	}
 
 	/* Figure out module layout, and allocate all the memory. */
 	mod = layout_and_allocate(info, flags);
 	if (IS_ERR(mod)) {
 		err = PTR_ERR(mod);
-		goto free_copy;
+		goto free_shared;
 	}
 
 	audit_log_kern_module(mod->name);
 
 	/* Reserve our place in the list. */
-	err = add_unformed_module(mod);
-	if (err)
-		goto free_module;
+	add_unformed_module(mod);
 
 #ifdef CONFIG_MODULE_SIG
 	mod->sig_ok = info->sig_ok;
@@ -2852,7 +2931,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
 	/* Done! */
 	trace_module_load(mod);
 
-	return do_init_module(mod);
+	err = do_init_module(mod);
+	finalize_running_load(shared_info, err);
+	return err;
 
  sysfs_cleanup:
 	mod_sysfs_teardown(mod);
@@ -2886,15 +2967,15 @@ static int load_module(struct load_info *info, const char __user *uargs,
 	/* Unlink carefully: kallsyms could be walking list. */
 	list_del_rcu(&mod->list);
 	mod_tree_remove(mod);
-	wake_up_all(&module_wq);
 	/* Wait for RCU-sched synchronizing before releasing mod->list. */
 	synchronize_rcu();
 	mutex_unlock(&module_mutex);
- free_module:
 	/* Free lock-classes; relies on the preceding sync_rcu() */
 	lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size);
 
 	module_deallocate(mod, info);
+ free_shared:
+	finalize_running_load(shared_info, err);
  free_copy:
 	free_copy(info, flags);
 	return err;
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] module: Merge same-name module load requests
  2022-09-05  8:41 [PATCH] module: Merge same-name module load requests Petr Pavlu
@ 2022-09-05 14:29 ` Petr Mladek
  2022-09-12 14:58   ` Petr Pavlu
  2022-09-05 14:47 ` kernel test robot
  1 sibling, 1 reply; 4+ messages in thread
From: Petr Mladek @ 2022-09-05 14:29 UTC (permalink / raw)
  To: Petr Pavlu; +Cc: mcgrof, linux-modules, linux-kernel, mwilck

On Mon 2022-09-05 10:41:31, Petr Pavlu wrote:
> During a system boot, it can happen that the kernel receives a burst of
> requests to insert the same module but loading it eventually fails
> during its init call. For instance, udev can make a request to insert
> a frequency module for each individual CPU when another frequency module
> is already loaded which causes the init function of the new module to
> return an error.
>
> The module loader currently serializes all such requests, with the
> barrier in add_unformed_module(). This creates a lot of unnecessary work
> and delays the boot.

Is it just an optimization or does it fix any real problem?
It would be nice to provide some more details here.
Otherwise, we do not know if the behavior change is worth it.


> This patch improves the behavior as follows:
> * A check whether a module load matches an already loaded module is
>   moved right after a module name is determined.
> * A new reference-counted shared_load_info structure is introduced to
>   keep track of duplicate load requests. Two loads are considered
>   equivalent if their module name matches. In case a load duplicates
>   another running insert, the code waits for its completion and then
>   returns -EEXIST or -ENODEV depending on whether it succeeded.

-ENODEV is strange, see https://www.gnu.org/software/libc/manual/html_node/Error-Codes.html

   Macro: int ENODEV

       “No such device.” The wrong type of device was given
       to a function that expects a particular sort of device.

IMHO, it does not fit here. What about -EBUSY?

   Macro: int EBUSY

       “Device or resource busy.” A system resource that can’t
       be shared is already in use. For example, if you try
       to delete a file that is the root of a currently mounted
       filesystem, you get this error.


> 
> Note that prior to 6e6de3dee51a ("kernel/module.c: Only return -EEXIST
> for modules that have finished loading"), the kernel already did merge
> some of same load requests but it was more by accident and relied on
> specific timing. The patch brings this behavior back in a more explicit
> form.
>
> ---
>  kernel/module/main.c | 207 ++++++++++++++++++++++++++++++-------------
>  1 file changed, 144 insertions(+), 63 deletions(-)
> 
> diff --git a/kernel/module/main.c b/kernel/module/main.c
> index a4e4d84b6f4e..24d0777c48e3 100644
> --- a/kernel/module/main.c
> +++ b/kernel/module/main.c
> @@ -2552,43 +2539,129 @@ static int may_init_module(void)
>  	return 0;
>  }
>  
> +static struct shared_load_info *
> +shared_load_info_alloc(const struct load_info *info)
> +{
> +	struct shared_load_info *shared_info =
> +		kzalloc(sizeof(*shared_info), GFP_KERNEL);
> +	if (shared_info == NULL)
> +		return ERR_PTR(-ENOMEM);
> +
> +	strscpy(shared_info->name, info->name, sizeof(shared_info->name));
> +	refcount_set(&shared_info->refcnt, 1);
> +	INIT_LIST_HEAD(&shared_info->list);
> +	return shared_info;
> +}
> +
> +static void shared_load_info_get(struct shared_load_info *shared_info)
> +{
> +	refcount_inc(&shared_info->refcnt);
> +}
> +
> +static void shared_load_info_put(struct shared_load_info *shared_info)
> +{
> +	if (refcount_dec_and_test(&shared_info->refcnt))
> +		kfree(shared_info);
> +}
> +
>  /*
> - * We try to place it in the list now to make sure it's unique before
> - * we dedicate too many resources.  In particular, temporary percpu
> + * Check that the module load is unique and make it visible to others. The code
> + * looks for parallel running inserts and already loaded modules. Two inserts
> + * are considered equivalent if their module name matches. In case this load
> + * duplicates another running insert, the code waits for its completion and
> + * then returns -EEXIST or -ENODEV depending on whether it succeeded.
> + *
> + * Detecting early that a load is unique avoids dedicating too many cycles and
> + * resources to bring up the module. In particular, it prevents temporary percpu
>   * memory exhaustion.
> + *
> + * Merging same load requests then primarily helps during the boot process. It
> + * can happen that the kernel receives a burst of requests to load the same
> + * module (for example, a same module for each individual CPU) and loading it
> + * eventually fails during its init call. Merging the requests allows that only
> + * one full attempt to load the module is made.
> + *
> + * On a non-error return, it is guaranteed that this load is unique.
>   */
> -static int add_unformed_module(struct module *mod)
> +static struct shared_load_info *add_running_load(const struct load_info *info)
>  {
> -	int err;
>  	struct module *old;
> +	struct shared_load_info *shared_info;
>  
> -	mod->state = MODULE_STATE_UNFORMED;
> -
> -again:
>  	mutex_lock(&module_mutex);
> -	old = find_module_all(mod->name, strlen(mod->name), true);
> -	if (old != NULL) {
> -		if (old->state != MODULE_STATE_LIVE) {
> -			/* Wait in case it fails to load. */
> +
> +	/* Search if there is a running load of a module with the same name. */
> +	list_for_each_entry(shared_info, &running_loads, list)
> +		if (strcmp(shared_info->name, info->name) == 0) {
> +			int err;
> +
> +			shared_load_info_get(shared_info);
>  			mutex_unlock(&module_mutex);
> +
>  			err = wait_event_interruptible(module_wq,
> -					       finished_loading(mod->name));
> -			if (err)
> -				goto out_unlocked;
> -			goto again;
> +						       shared_info->err != 0);
> +			if (!err)
> +				err = shared_info->err;

The logic around shared_info->err is a bit tricky. The value 0
means that the parallel load is still in progress. Any error
value means that it has finished. Where -EEXIST means that
the load actually succeeded.

Such optimizations might make sense when they might safe a lot
of memory. And even in these situations we should do out best
to keep the logic straightforward.

I suggest to set shared_info->err to the really returned value.
And use another logic to check if the load finished. Either
add a boolean. Or we might actually use shared_info->list.

struct shared_info is removed from @running_loads list when
the load finished. We could do in finalize_running_load():

	list_del_init(&shared_info->list);

and here:

			err = wait_event_interruptible(module_wq,
						       list_empty(&shared_info->list);

			/*
			 * Do not retry the module load when the parallel one
			 * failed. But do not return the exact error code
			 * because the parallel load might have used another
			 * module parameters. Instead return -EBUSY.
			 */
			if (!err) {
				err = shared_info->err ? -EBUSY : -EEXIST;
[...]


> +			shared_load_info_put(shared_info);
> +			shared_info = ERR_PTR(err);
> +			goto out_unlocked;
>  		}
> -		err = -EEXIST;
> +
> +	/* Search if there is a live module with the given name already. */
> +	old = find_module_all(info->name, strlen(info->name), true);
> +	if (old != NULL) {
> +		if (old->state == MODULE_STATE_LIVE) {
> +			shared_info = ERR_PTR(-EEXIST);
> +			goto out;
> +		}
> +
> +		/*
> +		 * Any active load always has its record in running_loads and so
> +		 * would be found above. This applies independent whether such
> +		 * a module is currently in MODULE_STATE_UNFORMED,
> +		 * MODULE_STATE_COMING, or even in MODULE_STATE_GOING if its
> +		 * initialization failed. It therefore means this must be an
> +		 * older going module and the caller should try later once it is
> +		 * gone.
> +		 */
> +		WARN_ON(old->state != MODULE_STATE_GOING);
> +		shared_info = ERR_PTR(-EAGAIN);

I would return -EBUSY here to avoid too many variants. The load failed because
the same module was being loaded or unloaded.

Anyway, it should be described in the commit message.

>  		goto out;
>  	}
> -	mod_update_bounds(mod);
> -	list_add_rcu(&mod->list, &modules);
> -	mod_tree_insert(mod);
> -	err = 0;
> +
> +	/* The load is unique, make it visible to others. */
> +	shared_info = shared_load_info_alloc(info);
> +	if (IS_ERR(shared_info))
> +		goto out;
> +	list_add(&shared_info->list, &running_loads);
>  
>  out:
>  	mutex_unlock(&module_mutex);
>  out_unlocked:
> -	return err;
> +	return shared_info;
> +}
> +
> +/* Complete the running load and inform other duplicate inserts about it. */
> +static void finalize_running_load(struct shared_load_info *shared_info, int err)
> +{
> +	mutex_lock(&module_mutex);
> +	list_del(&shared_info->list);
> +	shared_info->err = err == 0 ? -EEXIST : -ENODEV;

As explained above, I suggest to use:

	list_del_init(&shared_info->list);
	shared_info->err = err;

> +	mutex_unlock(&module_mutex);
> +
> +	wake_up_all(&module_wq);

Heh, this should be wake_up_interruptible() to match
the wait_event_interruptible().

The _all() variant is used when there exclusive waiters. I have
recently learned about it, see
https://lore.kernel.org/all/CAHk-=wgC47n_7E6UtFx_agkJtLmWOXGsjdFjybBFYNA1AheQLQ@mail.gmail.com/

But it should be fixed in a separate patch because the same mistake
was there even before.


Also it would make sense to add the wait queue head into struct
shared_info to reduce spurious wakeups. The head is small,
the struct is allocated anyway, and the lifecycle is the same.


> +	shared_load_info_put(shared_info);
> +}
> +
> +static void add_unformed_module(struct module *mod)
> +{
> +	mod->state = MODULE_STATE_UNFORMED;
> +
> +	mutex_lock(&module_mutex);
> +	mod_update_bounds(mod);
> +	list_add_rcu(&mod->list, &modules);
> +	mod_tree_insert(mod);
> +	mutex_unlock(&module_mutex);
>  }
>  
>  static int complete_formation(struct module *mod, struct load_info *info)

Otherwise, the patch looks good to me.

Best Regards,
Petr

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] module: Merge same-name module load requests
  2022-09-05  8:41 [PATCH] module: Merge same-name module load requests Petr Pavlu
  2022-09-05 14:29 ` Petr Mladek
@ 2022-09-05 14:47 ` kernel test robot
  1 sibling, 0 replies; 4+ messages in thread
From: kernel test robot @ 2022-09-05 14:47 UTC (permalink / raw)
  To: Petr Pavlu, mcgrof
  Cc: kbuild-all, linux-modules, linux-kernel, pmladek, mwilck, Petr Pavlu

Hi Petr,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on v6.0-rc4]
[also build test WARNING on linus/master next-20220901]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Petr-Pavlu/module-Merge-same-name-module-load-requests/20220905-164434
base:    7e18e42e4b280c85b76967a9106a13ca61c16179
config: i386-randconfig-s033-20220905 (https://download.01.org/0day-ci/archive/20220905/202209052214.GvWvd88T-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-5) 11.3.0
reproduce:
        # apt-get install sparse
        # sparse version: v0.6.4-39-gce1a6720-dirty
        # https://github.com/intel-lab-lkp/linux/commit/581eb179f101ce9990a4435e198280b542fa68ed
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Petr-Pavlu/module-Merge-same-name-module-load-requests/20220905-164434
        git checkout 581eb179f101ce9990a4435e198280b542fa68ed
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=i386 SHELL=/bin/bash kernel/module/

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>

sparse warnings: (new ones prefixed by >>)
>> kernel/module/main.c:81:1: sparse: sparse: symbol 'running_loads' was not declared. Should it be static?

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] module: Merge same-name module load requests
  2022-09-05 14:29 ` Petr Mladek
@ 2022-09-12 14:58   ` Petr Pavlu
  0 siblings, 0 replies; 4+ messages in thread
From: Petr Pavlu @ 2022-09-12 14:58 UTC (permalink / raw)
  To: Petr Mladek; +Cc: linux-modules, linux-kernel, mwilck, mcgrof

On 9/5/22 16:29, Petr Mladek wrote:
> On Mon 2022-09-05 10:41:31, Petr Pavlu wrote:
>> During a system boot, it can happen that the kernel receives a burst of
>> requests to insert the same module but loading it eventually fails
>> during its init call. For instance, udev can make a request to insert
>> a frequency module for each individual CPU when another frequency module
>> is already loaded which causes the init function of the new module to
>> return an error.
>>
>> The module loader currently serializes all such requests, with the
>> barrier in add_unformed_module(). This creates a lot of unnecessary work
>> and delays the boot.
> 
> Is it just an optimization or does it fix any real problem?
> It would be nice to provide some more details here.
> Otherwise, we do not know if the behavior change is worth it.

Motivation for this patch is to fix an issue observed on larger machines with
many CPUs where it can take a significant amount of time during boot to run
systemd-udev-trigger.service. An x86-64 system can have already intel_pstate
active but as its CPUs can match also acpi_cpufreq and pcc_cpufreq, udev will
attempt to load these modules too. The operation will eventually fail in the
init function of a respective module where it gets recognized that another
cpufreq driver is already loaded and -EEXIST is returned. However, one uevent
is triggered for each CPU and so multiple loads of these modules will be
present. The current code then processes all such loads individually and
serializes them with the barrier in add_unformed_module().

This can create a significant delay of several minutes. It prevents loading
drivers for other devices and might cause timeouts of services waiting on
them.

The issue was also observed with EDAC drivers which are similarly exclusive.

The problem was introduced with 6e6de3dee51a ("kernel/module.c: Only return
-EEXIST for modules that have finished loading") but arguably the previous
behavior was not well defined.

>> This patch improves the behavior as follows:
>> * A check whether a module load matches an already loaded module is
>>   moved right after a module name is determined.
>> * A new reference-counted shared_load_info structure is introduced to
>>   keep track of duplicate load requests. Two loads are considered
>>   equivalent if their module name matches. In case a load duplicates
>>   another running insert, the code waits for its completion and then
>>   returns -EEXIST or -ENODEV depending on whether it succeeded.
> 
> -ENODEV is strange, see https://www.gnu.org/software/libc/manual/html_node/Error-Codes.html
> 
>    Macro: int ENODEV
> 
>        “No such device.” The wrong type of device was given
>        to a function that expects a particular sort of device.
> 
> IMHO, it does not fit here. What about -EBUSY?
> 
>    Macro: int EBUSY
> 
>        “Device or resource busy.” A system resource that can’t
>        be shared is already in use. For example, if you try
>        to delete a file that is the root of a currently mounted
>        filesystem, you get this error.

Ok, makes sense.

>> Note that prior to 6e6de3dee51a ("kernel/module.c: Only return -EEXIST
>> for modules that have finished loading"), the kernel already did merge
>> some of same load requests but it was more by accident and relied on
>> specific timing. The patch brings this behavior back in a more explicit
>> form.
>>
>> ---
>>  kernel/module/main.c | 207 ++++++++++++++++++++++++++++++-------------
>>  1 file changed, 144 insertions(+), 63 deletions(-)
>>
>> diff --git a/kernel/module/main.c b/kernel/module/main.c
>> index a4e4d84b6f4e..24d0777c48e3 100644
>> --- a/kernel/module/main.c
>> +++ b/kernel/module/main.c
>> @@ -2552,43 +2539,129 @@ static int may_init_module(void)
>>  	return 0;
>>  }
>>  
>> +static struct shared_load_info *
>> +shared_load_info_alloc(const struct load_info *info)
>> +{
>> +	struct shared_load_info *shared_info =
>> +		kzalloc(sizeof(*shared_info), GFP_KERNEL);
>> +	if (shared_info == NULL)
>> +		return ERR_PTR(-ENOMEM);
>> +
>> +	strscpy(shared_info->name, info->name, sizeof(shared_info->name));
>> +	refcount_set(&shared_info->refcnt, 1);
>> +	INIT_LIST_HEAD(&shared_info->list);
>> +	return shared_info;
>> +}
>> +
>> +static void shared_load_info_get(struct shared_load_info *shared_info)
>> +{
>> +	refcount_inc(&shared_info->refcnt);
>> +}
>> +
>> +static void shared_load_info_put(struct shared_load_info *shared_info)
>> +{
>> +	if (refcount_dec_and_test(&shared_info->refcnt))
>> +		kfree(shared_info);
>> +}
>> +
>>  /*
>> - * We try to place it in the list now to make sure it's unique before
>> - * we dedicate too many resources.  In particular, temporary percpu
>> + * Check that the module load is unique and make it visible to others. The code
>> + * looks for parallel running inserts and already loaded modules. Two inserts
>> + * are considered equivalent if their module name matches. In case this load
>> + * duplicates another running insert, the code waits for its completion and
>> + * then returns -EEXIST or -ENODEV depending on whether it succeeded.
>> + *
>> + * Detecting early that a load is unique avoids dedicating too many cycles and
>> + * resources to bring up the module. In particular, it prevents temporary percpu
>>   * memory exhaustion.
>> + *
>> + * Merging same load requests then primarily helps during the boot process. It
>> + * can happen that the kernel receives a burst of requests to load the same
>> + * module (for example, a same module for each individual CPU) and loading it
>> + * eventually fails during its init call. Merging the requests allows that only
>> + * one full attempt to load the module is made.
>> + *
>> + * On a non-error return, it is guaranteed that this load is unique.
>>   */
>> -static int add_unformed_module(struct module *mod)
>> +static struct shared_load_info *add_running_load(const struct load_info *info)
>>  {
>> -	int err;
>>  	struct module *old;
>> +	struct shared_load_info *shared_info;
>>  
>> -	mod->state = MODULE_STATE_UNFORMED;
>> -
>> -again:
>>  	mutex_lock(&module_mutex);
>> -	old = find_module_all(mod->name, strlen(mod->name), true);
>> -	if (old != NULL) {
>> -		if (old->state != MODULE_STATE_LIVE) {
>> -			/* Wait in case it fails to load. */
>> +
>> +	/* Search if there is a running load of a module with the same name. */
>> +	list_for_each_entry(shared_info, &running_loads, list)
>> +		if (strcmp(shared_info->name, info->name) == 0) {
>> +			int err;
>> +
>> +			shared_load_info_get(shared_info);
>>  			mutex_unlock(&module_mutex);
>> +
>>  			err = wait_event_interruptible(module_wq,
>> -					       finished_loading(mod->name));
>> -			if (err)
>> -				goto out_unlocked;
>> -			goto again;
>> +						       shared_info->err != 0);
>> +			if (!err)
>> +				err = shared_info->err;
> 
> The logic around shared_info->err is a bit tricky. The value 0
> means that the parallel load is still in progress. Any error
> value means that it has finished. Where -EEXIST means that
> the load actually succeeded.
> 
> Such optimizations might make sense when they might safe a lot
> of memory. And even in these situations we should do out best
> to keep the logic straightforward.
> 
> I suggest to set shared_info->err to the really returned value.

Ok.

> And use another logic to check if the load finished. Either
> add a boolean. Or we might actually use shared_info->list.
> 
> struct shared_info is removed from @running_loads list when
> the load finished. We could do in finalize_running_load():
> 
> 	list_del_init(&shared_info->list);
> 
> and here:
> 
> 			err = wait_event_interruptible(module_wq,
> 						       list_empty(&shared_info->list);
> 
> 			/*
> 			 * Do not retry the module load when the parallel one
> 			 * failed. But do not return the exact error code
> 			 * because the parallel load might have used another
> 			 * module parameters. Instead return -EBUSY.
> 			 */
> 			if (!err) {
> 				err = shared_info->err ? -EBUSY : -EEXIST;
> [...]

Noted, discussed below.

>> +			shared_load_info_put(shared_info);
>> +			shared_info = ERR_PTR(err);
>> +			goto out_unlocked;
>>  		}
>> -		err = -EEXIST;
>> +
>> +	/* Search if there is a live module with the given name already. */
>> +	old = find_module_all(info->name, strlen(info->name), true);
>> +	if (old != NULL) {
>> +		if (old->state == MODULE_STATE_LIVE) {
>> +			shared_info = ERR_PTR(-EEXIST);
>> +			goto out;
>> +		}
>> +
>> +		/*
>> +		 * Any active load always has its record in running_loads and so
>> +		 * would be found above. This applies independent whether such
>> +		 * a module is currently in MODULE_STATE_UNFORMED,
>> +		 * MODULE_STATE_COMING, or even in MODULE_STATE_GOING if its
>> +		 * initialization failed. It therefore means this must be an
>> +		 * older going module and the caller should try later once it is
>> +		 * gone.
>> +		 */
>> +		WARN_ON(old->state != MODULE_STATE_GOING);
>> +		shared_info = ERR_PTR(-EAGAIN);
> 
> I would return -EBUSY here to avoid too many variants. The load failed because
> the same module was being loaded or unloaded.
> 
> Anyway, it should be described in the commit message.

Ack, I'll change this error to -EBUSY too.

>>  		goto out;
>>  	}
>> -	mod_update_bounds(mod);
>> -	list_add_rcu(&mod->list, &modules);
>> -	mod_tree_insert(mod);
>> -	err = 0;
>> +
>> +	/* The load is unique, make it visible to others. */
>> +	shared_info = shared_load_info_alloc(info);
>> +	if (IS_ERR(shared_info))
>> +		goto out;
>> +	list_add(&shared_info->list, &running_loads);
>>  
>>  out:
>>  	mutex_unlock(&module_mutex);
>>  out_unlocked:
>> -	return err;
>> +	return shared_info;
>> +}
>> +
>> +/* Complete the running load and inform other duplicate inserts about it. */
>> +static void finalize_running_load(struct shared_load_info *shared_info, int err)
>> +{
>> +	mutex_lock(&module_mutex);
>> +	list_del(&shared_info->list);
>> +	shared_info->err = err == 0 ? -EEXIST : -ENODEV;
> 
> As explained above, I suggest to use:
> 
> 	list_del_init(&shared_info->list);
> 	shared_info->err = err;
> 
>> +	mutex_unlock(&module_mutex);
>> +
>> +	wake_up_all(&module_wq);
> 
> Heh, this should be wake_up_interruptible() to match
> the wait_event_interruptible().
> 
> The _all() variant is used when there exclusive waiters. I have
> recently learned about it, see
> https://lore.kernel.org/all/CAHk-=wgC47n_7E6UtFx_agkJtLmWOXGsjdFjybBFYNA1AheQLQ@mail.gmail.com/
> 
> But it should be fixed in a separate patch because the same mistake
> was there even before.

I'll add a separate patch to the series to correct it.

> Also it would make sense to add the wait queue head into struct
> shared_info to reduce spurious wakeups. The head is small,
> the struct is allocated anyway, and the lifecycle is the same.

Considering this and your previous comment about waiting on shared_info->err,
it looks best to me to use a per-shared_load_info completion.

>> +	shared_load_info_put(shared_info);
>> +}
>> +
>> +static void add_unformed_module(struct module *mod)
>> +{
>> +	mod->state = MODULE_STATE_UNFORMED;
>> +
>> +	mutex_lock(&module_mutex);
>> +	mod_update_bounds(mod);
>> +	list_add_rcu(&mod->list, &modules);
>> +	mod_tree_insert(mod);
>> +	mutex_unlock(&module_mutex);
>>  }
>>  
>>  static int complete_formation(struct module *mod, struct load_info *info)
> 
> Otherwise, the patch looks good to me.

Thank you for the review. I will prepare v2 of the patch.

Petr

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-09-12 14:58 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-05  8:41 [PATCH] module: Merge same-name module load requests Petr Pavlu
2022-09-05 14:29 ` Petr Mladek
2022-09-12 14:58   ` Petr Pavlu
2022-09-05 14:47 ` kernel test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).