bitbake-devel.lists.openembedded.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/5] lib/bb: Clean up use of len()
@ 2021-11-02 14:57 Richard Purdie
  2021-11-02 14:57 ` [PATCH 2/5] daemonize: Avoid unclosed file warning Richard Purdie
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Richard Purdie @ 2021-11-02 14:57 UTC (permalink / raw)
  To: bitbake-devel

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
---
 bin/bitbake-diffsigs         |  4 +--
 lib/bb/cooker.py             |  6 ++--
 lib/bb/cookerdata.py         |  2 +-
 lib/bb/fetch2/__init__.py    |  4 +--
 lib/bb/fetch2/gitsm.py       |  2 +-
 lib/bb/runqueue.py           | 66 ++++++++++++++++++------------------
 lib/bb/taskdata.py           |  4 +--
 lib/bb/ui/buildinfohelper.py | 20 +++++------
 lib/bb/ui/uievent.py         |  4 +--
 9 files changed, 56 insertions(+), 56 deletions(-)

diff --git a/bin/bitbake-diffsigs b/bin/bitbake-diffsigs
index 6646dccdfa..cf4cc706a2 100755
--- a/bin/bitbake-diffsigs
+++ b/bin/bitbake-diffsigs
@@ -60,7 +60,7 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
 
     if sig1 and sig2:
         sigfiles = find_siginfo(bbhandler, pn, taskname, [sig1, sig2])
-        if len(sigfiles) == 0:
+        if not sigfiles:
             logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
             sys.exit(1)
         elif not sig1 in sigfiles:
@@ -86,7 +86,7 @@ def recursecb(key, hash1, hash2):
     hashfiles = find_siginfo(tinfoil, key, None, hashes)
 
     recout = []
-    if len(hashfiles) == 0:
+    if not hashfiles:
         recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
     elif not hash1 in hashfiles:
         recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
diff --git a/lib/bb/cooker.py b/lib/bb/cooker.py
index af794b4c42..194595ce8b 100644
--- a/lib/bb/cooker.py
+++ b/lib/bb/cooker.py
@@ -1656,7 +1656,7 @@ class BBCooker:
         # Return a copy, don't modify the original
         pkgs_to_build = pkgs_to_build[:]
 
-        if len(pkgs_to_build) == 0:
+        if not pkgs_to_build:
             raise NothingToBuild
 
         ignore = (self.data.getVar("ASSUME_PROVIDED") or "").split()
@@ -1795,10 +1795,10 @@ class CookerCollectFiles(object):
         files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem)[0] )
         config.setVar("BBFILES_PRIORITIZED", " ".join(files))
 
-        if not len(files):
+        if not files:
             files = self.get_bbfiles()
 
-        if not len(files):
+        if not files:
             collectlog.error("no recipe files to build, check your BBPATH and BBFILES?")
             bb.event.fire(CookerExit(), eventdata)
 
diff --git a/lib/bb/cookerdata.py b/lib/bb/cookerdata.py
index ba657c03b6..592bc2968e 100644
--- a/lib/bb/cookerdata.py
+++ b/lib/bb/cookerdata.py
@@ -86,7 +86,7 @@ class ConfigParameters(object):
                 action['msg'] = "Only one target can be used with the --environment option."
             elif self.options.buildfile and len(self.options.pkgs_to_build) > 0:
                 action['msg'] = "No target should be used with the --environment and --buildfile options."
-            elif len(self.options.pkgs_to_build) > 0:
+            elif self.options.pkgs_to_build:
                 action['action'] = ["showEnvironmentTarget", self.options.pkgs_to_build]
             else:
                 action['action'] = ["showEnvironment", self.options.buildfile]
diff --git a/lib/bb/fetch2/__init__.py b/lib/bb/fetch2/__init__.py
index 000b49a500..1d6e4e0964 100644
--- a/lib/bb/fetch2/__init__.py
+++ b/lib/bb/fetch2/__init__.py
@@ -770,7 +770,7 @@ def get_srcrev(d, method_name='sortable_revision'):
         if urldata[u].method.supports_srcrev():
             scms.append(u)
 
-    if len(scms) == 0:
+    if not scms:
         raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI")
 
     if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
@@ -1636,7 +1636,7 @@ class Fetch(object):
         if localonly and cache:
             raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time")
 
-        if len(urls) == 0:
+        if not urls:
             urls = d.getVar("SRC_URI").split()
         self.urls = urls
         self.d = d
diff --git a/lib/bb/fetch2/gitsm.py b/lib/bb/fetch2/gitsm.py
index a7110a988d..c5c23d5260 100644
--- a/lib/bb/fetch2/gitsm.py
+++ b/lib/bb/fetch2/gitsm.py
@@ -163,7 +163,7 @@ class GitSM(Git):
         else:
             self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
 
-        if len(need_update_list) > 0:
+        if need_update_list:
             logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
             return True
 
diff --git a/lib/bb/runqueue.py b/lib/bb/runqueue.py
index 10511a09dc..87c00462c1 100644
--- a/lib/bb/runqueue.py
+++ b/lib/bb/runqueue.py
@@ -547,7 +547,7 @@ class RunQueueData:
                         next_points.append(revdep)
                         task_done[revdep] = True
             endpoints = next_points
-            if len(next_points) == 0:
+            if not next_points:
                 break
 
         # Circular dependency sanity check
@@ -589,7 +589,7 @@ class RunQueueData:
 
         found = False
         for mc in self.taskData:
-            if len(taskData[mc].taskentries) > 0:
+            if taskData[mc].taskentries:
                 found = True
                 break
         if not found:
@@ -773,7 +773,7 @@ class RunQueueData:
         # Find the dependency chain endpoints
         endpoints = set()
         for tid in self.runtaskentries:
-            if len(deps[tid]) == 0:
+            if not deps[tid]:
                 endpoints.add(tid)
         # Iterate the chains collating dependencies
         while endpoints:
@@ -784,11 +784,11 @@ class RunQueueData:
                     cumulativedeps[dep].update(cumulativedeps[tid])
                     if tid in deps[dep]:
                         deps[dep].remove(tid)
-                    if len(deps[dep]) == 0:
+                    if not deps[dep]:
                         next.add(dep)
             endpoints = next
         #for tid in deps:
-        #    if len(deps[tid]) != 0:
+        #    if deps[tid]:
         #        bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
 
         # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
@@ -958,7 +958,7 @@ class RunQueueData:
                     delcount[tid] = self.runtaskentries[tid]
                     del self.runtaskentries[tid]
 
-            if len(self.runtaskentries) == 0:
+            if not self.runtaskentries:
                 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
 
         self.init_progress_reporter.next_stage()
@@ -983,7 +983,7 @@ class RunQueueData:
                     delcount[tid] = self.runtaskentries[tid]
                     del self.runtaskentries[tid]
 
-            if len(self.runtaskentries) == 0:
+            if not self.runtaskentries:
                 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
 
         #
@@ -991,7 +991,7 @@ class RunQueueData:
         #
 
         # Check to make sure we still have tasks to run
-        if len(self.runtaskentries) == 0:
+        if not self.runtaskentries:
             if not taskData[''].abort:
                 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
             else:
@@ -1015,7 +1015,7 @@ class RunQueueData:
         endpoints = []
         for tid in self.runtaskentries:
             revdeps = self.runtaskentries[tid].revdeps
-            if len(revdeps) == 0:
+            if not revdeps:
                 endpoints.append(tid)
             for dep in revdeps:
                 if dep in self.runtaskentries[tid].depends:
@@ -1188,9 +1188,9 @@ class RunQueueData:
         # Iterate over the task list and call into the siggen code
         dealtwith = set()
         todeal = set(self.runtaskentries)
-        while len(todeal) > 0:
+        while todeal:
             for tid in todeal.copy():
-                if len(self.runtaskentries[tid].depends - dealtwith) == 0:
+                if not (self.runtaskentries[tid].depends - dealtwith):
                     dealtwith.add(tid)
                     todeal.remove(tid)
                     self.prepare_task_hash(tid)
@@ -1500,10 +1500,10 @@ class RunQueue:
             self.rqexe = RunQueueExecute(self)
 
             # If we don't have any setscene functions, skip execution
-            if len(self.rqdata.runq_setscene_tids) == 0:
+            if not self.rqdata.runq_setscene_tids:
                 logger.info('No setscene tasks')
                 for tid in self.rqdata.runtaskentries:
-                    if len(self.rqdata.runtaskentries[tid].depends) == 0:
+                    if not self.rqdata.runtaskentries[tid].depends:
                         self.rqexe.setbuildable(tid)
                     self.rqexe.tasks_notcovered.add(tid)
                 self.rqexe.sqdone = True
@@ -1780,7 +1780,7 @@ class RunQueueExecute:
             bb.fatal("Invalid scheduler '%s'.  Available schedulers: %s" %
                      (self.scheduler, ", ".join(obj.name for obj in schedulers)))
 
-        #if len(self.rqdata.runq_setscene_tids) > 0:
+        #if self.rqdata.runq_setscene_tids:
         self.sqdata = SQData()
         build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
 
@@ -1821,7 +1821,7 @@ class RunQueueExecute:
                 # worker must have died?
                 pass
 
-        if len(self.failed_tids) != 0:
+        if self.failed_tids:
             self.rq.state = runQueueFailed
             return
 
@@ -1837,7 +1837,7 @@ class RunQueueExecute:
             self.rq.read_workers()
             return self.rq.active_fds()
 
-        if len(self.failed_tids) != 0:
+        if self.failed_tids:
             self.rq.state = runQueueFailed
             return True
 
@@ -2001,7 +2001,7 @@ class RunQueueExecute:
             if x not in self.tasks_scenequeue_done:
                 logger.error("Task %s was never processed by the setscene code" % x)
                 err = True
-            if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
+            if not self.rqdata.runtaskentries[x].depends and x not in self.runq_buildable:
                 logger.error("Task %s was never marked as buildable by the setscene code" % x)
                 err = True
         return err
@@ -2025,7 +2025,7 @@ class RunQueueExecute:
             # Find the next setscene to run
             for nexttask in self.sorted_setscene_tids:
                 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
-                    if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
+                    if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
                         if nexttask not in self.rqdata.target_tids:
                             logger.debug2("Skipping setscene for task %s" % nexttask)
                             self.sq_task_skip(nexttask)
@@ -2189,7 +2189,7 @@ class RunQueueExecute:
             if self.can_start_task():
                 return True
 
-        if self.stats.active > 0 or len(self.sq_live) > 0:
+        if self.stats.active > 0 or self.sq_live:
             self.rq.read_workers()
             return self.rq.active_fds()
 
@@ -2201,7 +2201,7 @@ class RunQueueExecute:
                 self.sq_task_failoutright(tid)
             return True
 
-        if len(self.failed_tids) != 0:
+        if self.failed_tids:
             self.rq.state = runQueueFailed
             return True
 
@@ -2280,7 +2280,7 @@ class RunQueueExecute:
         covered.intersection_update(self.tasks_scenequeue_done)
 
         for tid in notcovered | covered:
-            if len(self.rqdata.runtaskentries[tid].depends) == 0:
+            if not self.rqdata.runtaskentries[tid].depends:
                 self.setbuildable(tid)
             elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
                  self.setbuildable(tid)
@@ -2339,7 +2339,7 @@ class RunQueueExecute:
         # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
         next = set()
         for p in total:
-            if len(self.rqdata.runtaskentries[p].depends) == 0:
+            if not self.rqdata.runtaskentries[p].depends:
                 next.add(p)
             elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
                 next.add(p)
@@ -2349,7 +2349,7 @@ class RunQueueExecute:
             current = next.copy()
             next = set()
             for tid in current:
-                if len(self.rqdata.runtaskentries[p].depends) and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
+                if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
                     continue
                 orighash = self.rqdata.runtaskentries[tid].hash
                 dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid))
@@ -2436,7 +2436,7 @@ class RunQueueExecute:
             if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
                 if tid not in self.sq_buildable:
                     self.sq_buildable.add(tid)
-            if len(self.sqdata.sq_revdeps[tid]) == 0:
+            if not self.sqdata.sq_revdeps[tid]:
                 self.sq_buildable.add(tid)
 
             if tid in self.sqdata.outrightfail:
@@ -2652,7 +2652,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
     for tid in rqdata.runtaskentries:
         sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
         sq_revdeps_squash[tid] = set()
-        if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
+        if not sq_revdeps[tid] and tid not in rqdata.runq_setscene_tids:
             #bb.warn("Added endpoint %s" % (tid))
             endpoints[tid] = set()
 
@@ -2693,9 +2693,9 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
                     sq_revdeps[dep].remove(point)
                 if tasks:
                     sq_revdeps_squash[dep] |= tasks
-                if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
+                if not sq_revdeps[dep] and dep not in rqdata.runq_setscene_tids:
                     newendpoints[dep] = task
-        if len(newendpoints) != 0:
+        if newendpoints:
             process_endpoints(newendpoints)
 
     process_endpoints(endpoints)
@@ -2707,7 +2707,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
     # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
     new = True
     for tid in rqdata.runtaskentries:
-        if len(rqdata.runtaskentries[tid].revdeps) == 0:
+        if not rqdata.runtaskentries[tid].revdeps:
             sqdata.unskippable.add(tid)
     sqdata.unskippable |= sqrq.cantskip
     while new:
@@ -2716,7 +2716,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
         for tid in sorted(orig, reverse=True):
             if tid in rqdata.runq_setscene_tids:
                 continue
-            if len(rqdata.runtaskentries[tid].depends) == 0:
+            if not rqdata.runtaskentries[tid].depends:
                 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
                 sqrq.setbuildable(tid)
             sqdata.unskippable |= rqdata.runtaskentries[tid].depends
@@ -2731,7 +2731,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
     for taskcounter, tid in enumerate(rqdata.runtaskentries):
         if tid in rqdata.runq_setscene_tids:
             pass
-        elif len(sq_revdeps_squash[tid]) != 0:
+        elif sq_revdeps_squash[tid]:
             bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
         else:
             del sq_revdeps_squash[tid]
@@ -2796,7 +2796,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
     sqdata.multiconfigs = set()
     for tid in sqdata.sq_revdeps:
         sqdata.multiconfigs.add(mc_from_tid(tid))
-        if len(sqdata.sq_revdeps[tid]) == 0:
+        if not sqdata.sq_revdeps[tid]:
             sqrq.sq_buildable.add(tid)
 
     rqdata.init_progress_reporter.finish()
@@ -3050,7 +3050,7 @@ class runQueuePipe():
                 raise
         end = len(self.queue)
         found = True
-        while found and len(self.queue):
+        while found and self.queue:
             found = False
             index = self.queue.find(b"</event>")
             while index != -1 and self.queue.startswith(b"<event>"):
@@ -3088,7 +3088,7 @@ class runQueuePipe():
     def close(self):
         while self.read():
             continue
-        if len(self.queue) > 0:
+        if self.queue:
             print("Warning, worker left partial message: %s" % self.queue)
         self.input.close()
 
diff --git a/lib/bb/taskdata.py b/lib/bb/taskdata.py
index 47bad6d1fa..7bfcdb8414 100644
--- a/lib/bb/taskdata.py
+++ b/lib/bb/taskdata.py
@@ -451,12 +451,12 @@ class TaskData:
         for target in self.build_targets:
             if fn in self.build_targets[target]:
                 self.build_targets[target].remove(fn)
-                if len(self.build_targets[target]) == 0:
+                if not self.build_targets[target]:
                     self.remove_buildtarget(target, missing_list)
         for target in self.run_targets:
             if fn in self.run_targets[target]:
                 self.run_targets[target].remove(fn)
-                if len(self.run_targets[target]) == 0:
+                if not self.run_targets[target]:
                     self.remove_runtarget(target, missing_list)
 
     def remove_buildtarget(self, target, missing_list=None):
diff --git a/lib/bb/ui/buildinfohelper.py b/lib/bb/ui/buildinfohelper.py
index 43aa592842..8588849dd4 100644
--- a/lib/bb/ui/buildinfohelper.py
+++ b/lib/bb/ui/buildinfohelper.py
@@ -483,11 +483,11 @@ class ORMWrapper(object):
 
             # we already created the root directory, so ignore any
             # entry for it
-            if len(path) == 0:
+            if not path:
                 continue
 
             parent_path = "/".join(path.split("/")[:len(path.split("/")) - 1])
-            if len(parent_path) == 0:
+            if not parent_path:
                 parent_path = "/"
             parent_obj = self._cached_get(Target_File, target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
             tf_obj = Target_File.objects.create(
@@ -633,7 +633,7 @@ class ORMWrapper(object):
                         packagefile_objects.append(Package_File( package = packagedict[p]['object'],
                             path = targetpath,
                             size = targetfilesize))
-                    if len(packagefile_objects):
+                    if packagefile_objects:
                         Package_File.objects.bulk_create(packagefile_objects)
                 except KeyError as e:
                     errormsg += "  stpi: Key error, package %s key %s \n" % ( p, e )
@@ -673,7 +673,7 @@ class ORMWrapper(object):
                     logger.warning("Could not add dependency to the package %s "
                                    "because %s is an unknown package", p, px)
 
-        if len(packagedeps_objs) > 0:
+        if packagedeps_objs:
             Package_Dependency.objects.bulk_create(packagedeps_objs)
         else:
             logger.info("No package dependencies created")
@@ -767,7 +767,7 @@ class ORMWrapper(object):
             packagefile_objects.append(Package_File( package = bp_object,
                                         path = path,
                                         size = package_info['FILES_INFO'][path] ))
-        if len(packagefile_objects):
+        if packagefile_objects:
             Package_File.objects.bulk_create(packagefile_objects)
 
         def _po_byname(p):
@@ -809,7 +809,7 @@ class ORMWrapper(object):
                 packagedeps_objs.append(Package_Dependency(  package = bp_object,
                     depends_on = _po_byname(p), dep_type = Package_Dependency.TYPE_RCONFLICTS))
 
-        if len(packagedeps_objs) > 0:
+        if packagedeps_objs:
             Package_Dependency.objects.bulk_create(packagedeps_objs)
 
         return bp_object
@@ -826,7 +826,7 @@ class ORMWrapper(object):
                     desc = vardump[root_var]['doc']
             if desc is None:
                 desc = ''
-            if len(desc):
+            if desc:
                 HelpText.objects.get_or_create(build=build_obj,
                                                area=HelpText.VARIABLE,
                                                key=k, text=desc)
@@ -846,7 +846,7 @@ class ORMWrapper(object):
                                 file_name = vh['file'],
                                 line_number = vh['line'],
                                 operation = vh['op']))
-                if len(varhist_objects):
+                if varhist_objects:
                     VariableHistory.objects.bulk_create(varhist_objects)
 
 
@@ -1069,7 +1069,7 @@ class BuildInfoHelper(object):
         for t in self.internal_state['targets']:
             buildname = self.internal_state['build'].build_name
             pe, pv = task_object.recipe.version.split(":",1)
-            if len(pe) > 0:
+            if pe:
                 package = task_object.recipe.name + "-" + pe + "_" + pv
             else:
                 package = task_object.recipe.name + "-" + pv
@@ -1618,7 +1618,7 @@ class BuildInfoHelper(object):
 
         if 'backlog' in self.internal_state:
             # if we have a backlog of events, do our best to save them here
-            if len(self.internal_state['backlog']):
+            if self.internal_state['backlog']:
                 tempevent = self.internal_state['backlog'].pop()
                 logger.debug("buildinfohelper: Saving stored event %s "
                              % tempevent)
diff --git a/lib/bb/ui/uievent.py b/lib/bb/ui/uievent.py
index 8607d0523b..e19c770bc9 100644
--- a/lib/bb/ui/uievent.py
+++ b/lib/bb/ui/uievent.py
@@ -73,13 +73,13 @@ class BBUIEventQueue:
 
         self.eventQueueLock.acquire()
 
-        if len(self.eventQueue) == 0:
+        if not self.eventQueue:
             self.eventQueueLock.release()
             return None
 
         item = self.eventQueue.pop(0)
 
-        if len(self.eventQueue) == 0:
+        if not self.eventQueue:
             self.eventQueueNotify.clear()
 
         self.eventQueueLock.release()
-- 
2.32.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/5] daemonize: Avoid unclosed file warning
  2021-11-02 14:57 [PATCH 1/5] lib/bb: Clean up use of len() Richard Purdie
@ 2021-11-02 14:57 ` Richard Purdie
  2021-11-02 14:57 ` [PATCH 3/5] lib/bb: Fix string concatination potential performance issues Richard Purdie
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Richard Purdie @ 2021-11-02 14:57 UTC (permalink / raw)
  To: bitbake-devel

In theory we can leak the so file descriptor so refactor the code
to avoid that.

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
---
 lib/bb/daemonize.py | 42 +++++++++++++++++++++---------------------
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/lib/bb/daemonize.py b/lib/bb/daemonize.py
index c187fcfc6c..40fabd0c0a 100644
--- a/lib/bb/daemonize.py
+++ b/lib/bb/daemonize.py
@@ -74,26 +74,26 @@ def createDaemon(function, logfile):
     with open('/dev/null', 'r') as si:
         os.dup2(si.fileno(), sys.stdin.fileno())
 
-    try:
-        so = open(logfile, 'a+')
-        os.dup2(so.fileno(), sys.stdout.fileno())
-        os.dup2(so.fileno(), sys.stderr.fileno())
-    except io.UnsupportedOperation:
-        sys.stdout = open(logfile, 'a+')
+    with open(logfile, 'a+') as so:
+        try:
+            os.dup2(so.fileno(), sys.stdout.fileno())
+            os.dup2(so.fileno(), sys.stderr.fileno())
+        except io.UnsupportedOperation:
+            sys.stdout = so
 
-    # Have stdout and stderr be the same so log output matches chronologically
-    # and there aren't two seperate buffers
-    sys.stderr = sys.stdout
+        # Have stdout and stderr be the same so log output matches chronologically
+        # and there aren't two seperate buffers
+        sys.stderr = sys.stdout
 
-    try:
-        function()
-    except Exception as e:
-        traceback.print_exc()
-    finally:
-        bb.event.print_ui_queue()
-        # os._exit() doesn't flush open files like os.exit() does. Manually flush
-        # stdout and stderr so that any logging output will be seen, particularly
-        # exception tracebacks.
-        sys.stdout.flush()
-        sys.stderr.flush()
-        os._exit(0)
+        try:
+            function()
+        except Exception as e:
+            traceback.print_exc()
+        finally:
+            bb.event.print_ui_queue()
+            # os._exit() doesn't flush open files like os.exit() does. Manually flush
+            # stdout and stderr so that any logging output will be seen, particularly
+            # exception tracebacks.
+            sys.stdout.flush()
+            sys.stderr.flush()
+            os._exit(0)
-- 
2.32.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/5] lib/bb: Fix string concatination potential performance issues
  2021-11-02 14:57 [PATCH 1/5] lib/bb: Clean up use of len() Richard Purdie
  2021-11-02 14:57 ` [PATCH 2/5] daemonize: Avoid unclosed file warning Richard Purdie
@ 2021-11-02 14:57 ` Richard Purdie
  2021-11-02 14:57 ` [PATCH 4/5] fetch: Handle mirror user/password replacements correctly Richard Purdie
  2021-11-02 14:57 ` [PATCH 5/5] tests/fetch: Add test for url parameter rewriting Richard Purdie
  3 siblings, 0 replies; 5+ messages in thread
From: Richard Purdie @ 2021-11-02 14:57 UTC (permalink / raw)
  To: bitbake-devel

Python scales badly when concatinating strings in loops. Most of these
references aren't problematic but at least one (in data.py) is probably
a performance issue as the issue is compounded as strings become large.

The way to handle this in python is to create lists which don't reconstruct
all the objects when appending to them. We may as well fix all the references
since it stops them being copy/pasted into something problematic in the future.

This patch was based on issues highligthted by a report from AWS Codeguru.

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
---
 lib/bb/data.py               | 24 +++++++++++------------
 lib/bb/data_smart.py         |  6 +++---
 lib/bb/fetch2/__init__.py    | 16 ++++++++--------
 lib/bb/runqueue.py           | 37 ++++++++++++++++++------------------
 lib/bb/server/process.py     |  6 +++---
 lib/bb/ui/buildinfohelper.py | 16 ++++++++--------
 6 files changed, 53 insertions(+), 52 deletions(-)

diff --git a/lib/bb/data.py b/lib/bb/data.py
index 9d18b1e2bf..008c5593cf 100644
--- a/lib/bb/data.py
+++ b/lib/bb/data.py
@@ -285,21 +285,19 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
         vardeps = varflags.get("vardeps")
 
         def handle_contains(value, contains, d):
-            newvalue = ""
+            newvalue = []
+            if value:
+                newvalue.append(value)
             for k in sorted(contains):
                 l = (d.getVar(k) or "").split()
                 for item in sorted(contains[k]):
                     for word in item.split():
                         if not word in l:
-                            newvalue += "\n%s{%s} = Unset" % (k, item)
+                            newvalue.append("\n%s{%s} = Unset" % (k, item))
                             break
                     else:
-                        newvalue += "\n%s{%s} = Set" % (k, item)
-            if not newvalue:
-                return value
-            if not value:
-                return newvalue
-            return value + newvalue
+                        newvalue.append("\n%s{%s} = Set" % (k, item))
+            return "".join(newvalue)
 
         def handle_remove(value, deps, removes, d):
             for r in sorted(removes):
@@ -406,7 +404,9 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, whitelist, fn):
 
         if data is None:
             bb.error("Task %s from %s seems to be empty?!" % (task, fn))
-            data = ''
+            data = []
+        else:
+            data = [data]
 
         gendeps[task] -= whitelist
         newdeps = gendeps[task]
@@ -424,12 +424,12 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, whitelist, fn):
 
         alldeps = sorted(seen)
         for dep in alldeps:
-            data = data + dep
+            data.append(dep)
             var = lookupcache[dep]
             if var is not None:
-                data = data + str(var)
+                data.append(str(var))
         k = fn + ":" + task
-        basehash[k] = hashlib.sha256(data.encode("utf-8")).hexdigest()
+        basehash[k] = hashlib.sha256("".join(data).encode("utf-8")).hexdigest()
         taskdeps[task] = alldeps
 
     return taskdeps, basehash
diff --git a/lib/bb/data_smart.py b/lib/bb/data_smart.py
index 8d235da121..7ed7112bdc 100644
--- a/lib/bb/data_smart.py
+++ b/lib/bb/data_smart.py
@@ -810,7 +810,7 @@ class DataSmart(MutableMapping):
                     expanded_removes[r] = self.expand(r).split()
 
                 parser.removes = set()
-                val = ""
+                val = []
                 for v in __whitespace_split__.split(parser.value):
                     skip = False
                     for r in removes:
@@ -819,8 +819,8 @@ class DataSmart(MutableMapping):
                             skip = True
                     if skip:
                         continue
-                    val = val + v
-                parser.value = val
+                    val.append(v)
+                parser.value = "".join(val)
                 if expand:
                     value = parser.value
 
diff --git a/lib/bb/fetch2/__init__.py b/lib/bb/fetch2/__init__.py
index 1d6e4e0964..43b312ce7e 100644
--- a/lib/bb/fetch2/__init__.py
+++ b/lib/bb/fetch2/__init__.py
@@ -402,24 +402,24 @@ def encodeurl(decoded):
 
     if not type:
         raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
-    url = '%s://' % type
+    url = ['%s://' % type]
     if user and type != "file":
-        url += "%s" % user
+        url.append("%s" % user)
         if pswd:
-            url += ":%s" % pswd
-        url += "@"
+            url.append(":%s" % pswd)
+        url.append("@")
     if host and type != "file":
-        url += "%s" % host
+        url.append("%s" % host)
     if path:
         # Standardise path to ensure comparisons work
         while '//' in path:
             path = path.replace("//", "/")
-        url += "%s" % urllib.parse.quote(path)
+        url.append("%s" % urllib.parse.quote(path))
     if p:
         for parm in p:
-            url += ";%s=%s" % (parm, p[parm])
+            url.append(";%s=%s" % (parm, p[parm]))
 
-    return url
+    return "".join(url)
 
 def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
     if not ud.url or not uri_find or not uri_replace:
diff --git a/lib/bb/runqueue.py b/lib/bb/runqueue.py
index 87c00462c1..67f8dfbd9e 100644
--- a/lib/bb/runqueue.py
+++ b/lib/bb/runqueue.py
@@ -1061,12 +1061,12 @@ class RunQueueData:
                         seen_pn.append(pn)
                     else:
                         bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
-                msg = "Multiple .bb files are due to be built which each provide %s:\n  %s" % (prov, "\n  ".join(prov_list[prov]))
+                msgs = ["Multiple .bb files are due to be built which each provide %s:\n  %s" % (prov, "\n  ".join(prov_list[prov]))]
                 #
                 # Construct a list of things which uniquely depend on each provider
                 # since this may help the user figure out which dependency is triggering this warning
                 #
-                msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
+                msg.append("\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from.")
                 deplist = {}
                 commondeps = None
                 for provfn in prov_list[prov]:
@@ -1086,12 +1086,12 @@ class RunQueueData:
                         commondeps &= deps
                     deplist[provfn] = deps
                 for provfn in deplist:
-                    msg += "\n%s has unique dependees:\n  %s" % (provfn, "\n  ".join(deplist[provfn] - commondeps))
+                    msg.append("\n%s has unique dependees:\n  %s" % (provfn, "\n  ".join(deplist[provfn] - commondeps)))
                 #
                 # Construct a list of provides and runtime providers for each recipe
                 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
                 #
-                msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
+                msg.append("\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful.")
                 provide_results = {}
                 rprovide_results = {}
                 commonprovs = None
@@ -1118,16 +1118,16 @@ class RunQueueData:
                     else:
                         commonrprovs &= rprovides
                     rprovide_results[provfn] = rprovides
-                #msg += "\nCommon provides:\n  %s" % ("\n  ".join(commonprovs))
-                #msg += "\nCommon rprovides:\n  %s" % ("\n  ".join(commonrprovs))
+                #msg.append("\nCommon provides:\n  %s" % ("\n  ".join(commonprovs)))
+                #msg.append("\nCommon rprovides:\n  %s" % ("\n  ".join(commonrprovs)))
                 for provfn in prov_list[prov]:
-                    msg += "\n%s has unique provides:\n  %s" % (provfn, "\n  ".join(provide_results[provfn] - commonprovs))
-                    msg += "\n%s has unique rprovides:\n  %s" % (provfn, "\n  ".join(rprovide_results[provfn] - commonrprovs))
+                    msg.append("\n%s has unique provides:\n  %s" % (provfn, "\n  ".join(provide_results[provfn] - commonprovs)))
+                    msg.append("\n%s has unique rprovides:\n  %s" % (provfn, "\n  ".join(rprovide_results[provfn] - commonrprovs)))
 
                 if self.warn_multi_bb:
-                    logger.verbnote(msg)
+                    logger.verbnote("".join(msg))
                 else:
-                    logger.error(msg)
+                    logger.error("".join(msg))
 
         self.init_progress_reporter.next_stage()
 
@@ -1935,7 +1935,7 @@ class RunQueueExecute:
         self.stats.taskFailed()
         self.failed_tids.append(task)
 
-        fakeroot_log = ""
+        fakeroot_log = []
         if fakerootlog and os.path.exists(fakerootlog):
             with open(fakerootlog) as fakeroot_log_file:
                 fakeroot_failed = False
@@ -1945,12 +1945,12 @@ class RunQueueExecute:
                             fakeroot_failed = True
                     if 'doing new pid setup and server start' in line:
                         break
-                    fakeroot_log = line + fakeroot_log
+                    fakeroot_log.append(line)
 
             if not fakeroot_failed:
-                fakeroot_log = None
+                fakeroot_log = []
 
-        bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=fakeroot_log), self.cfgData)
+        bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_log) or None)), self.cfgData)
 
         if self.rqdata.taskData[''].abort:
             self.rq.state = runQueueCleanUp
@@ -2608,12 +2608,13 @@ class RunQueueExecute:
         pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
         if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
             if tid in self.rqdata.runq_setscene_tids:
-                msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
+                msg = ['Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)]
             else:
-                msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
+                msg = ['Task %s.%s attempted to execute unexpectedly' % (pn, taskname)]
             for t in self.scenequeue_notcovered:
-                msg = msg + "\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)
-            logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
+                msg.append("\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash))
+            msg.append('\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
+            logger.error("".join(msg))
             return True
         return False
 
diff --git a/lib/bb/server/process.py b/lib/bb/server/process.py
index 8fdcc66dc7..1636616660 100644
--- a/lib/bb/server/process.py
+++ b/lib/bb/server/process.py
@@ -326,10 +326,10 @@ class ProcessServer():
                         if e.errno != errno.ENOENT:
                             raise
 
-                msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock"
+                msg = ["Delaying shutdown due to active processes which appear to be holding bitbake.lock"]
                 if procs:
-                    msg += ":\n%s" % str(procs.decode("utf-8"))
-                serverlog(msg)
+                    msg.append(":\n%s" % str(procs.decode("utf-8")))
+                serverlog("".join(msg))
 
     def idle_commands(self, delay, fds=None):
         nextsleep = delay
diff --git a/lib/bb/ui/buildinfohelper.py b/lib/bb/ui/buildinfohelper.py
index 8588849dd4..835e92c299 100644
--- a/lib/bb/ui/buildinfohelper.py
+++ b/lib/bb/ui/buildinfohelper.py
@@ -571,7 +571,7 @@ class ORMWrapper(object):
         assert isinstance(build_obj, Build)
         assert isinstance(target_obj, Target)
 
-        errormsg = ""
+        errormsg = []
         for p in packagedict:
             # Search name swtiches round the installed name vs package name
             # by default installed name == package name
@@ -636,7 +636,7 @@ class ORMWrapper(object):
                     if packagefile_objects:
                         Package_File.objects.bulk_create(packagefile_objects)
                 except KeyError as e:
-                    errormsg += "  stpi: Key error, package %s key %s \n" % ( p, e )
+                    errormsg.append("  stpi: Key error, package %s key %s \n" % (p, e))
 
             # save disk installed size
             packagedict[p]['object'].installed_size = packagedict[p]['size']
@@ -678,8 +678,8 @@ class ORMWrapper(object):
         else:
             logger.info("No package dependencies created")
 
-        if len(errormsg) > 0:
-            logger.warning("buildinfohelper: target_package_info could not identify recipes: \n%s", errormsg)
+        if errormsg:
+            logger.warning("buildinfohelper: target_package_info could not identify recipes: \n%s", "".join(errormsg))
 
     def save_target_image_file_information(self, target_obj, file_name, file_size):
         Target_Image_File.objects.create(target=target_obj,
@@ -1404,7 +1404,7 @@ class BuildInfoHelper(object):
         assert 'pn' in event._depgraph
         assert 'tdepends' in event._depgraph
 
-        errormsg = ""
+        errormsg = []
 
         # save layer version priorities
         if 'layer-priorities' in event._depgraph.keys():
@@ -1496,7 +1496,7 @@ class BuildInfoHelper(object):
                 elif dep in self.internal_state['recipes']:
                     dependency = self.internal_state['recipes'][dep]
                 else:
-                    errormsg += "  stpd: KeyError saving recipe dependency for %s, %s \n" % (recipe, dep)
+                    errormsg.append("  stpd: KeyError saving recipe dependency for %s, %s \n" % (recipe, dep))
                     continue
                 recipe_dep = Recipe_Dependency(recipe=target,
                                                depends_on=dependency,
@@ -1537,8 +1537,8 @@ class BuildInfoHelper(object):
                 taskdeps_objects.append(Task_Dependency( task = target, depends_on = dep ))
         Task_Dependency.objects.bulk_create(taskdeps_objects)
 
-        if len(errormsg) > 0:
-            logger.warning("buildinfohelper: dependency info not identify recipes: \n%s", errormsg)
+        if errormsg:
+            logger.warning("buildinfohelper: dependency info not identify recipes: \n%s", "".join(errormsg))
 
 
     def store_build_package_information(self, event):
-- 
2.32.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 4/5] fetch: Handle mirror user/password replacements correctly
  2021-11-02 14:57 [PATCH 1/5] lib/bb: Clean up use of len() Richard Purdie
  2021-11-02 14:57 ` [PATCH 2/5] daemonize: Avoid unclosed file warning Richard Purdie
  2021-11-02 14:57 ` [PATCH 3/5] lib/bb: Fix string concatination potential performance issues Richard Purdie
@ 2021-11-02 14:57 ` Richard Purdie
  2021-11-02 14:57 ` [PATCH 5/5] tests/fetch: Add test for url parameter rewriting Richard Purdie
  3 siblings, 0 replies; 5+ messages in thread
From: Richard Purdie @ 2021-11-02 14:57 UTC (permalink / raw)
  To: bitbake-devel

Username or password replacements in URIs were being appended rather than
replaced in mirror url remapping. Fix this and add a test case.

[YOCTO #13823]

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
---
 lib/bb/fetch2/__init__.py | 4 ++++
 lib/bb/tests/fetch.py     | 4 ++++
 2 files changed, 8 insertions(+)

diff --git a/lib/bb/fetch2/__init__.py b/lib/bb/fetch2/__init__.py
index 43b312ce7e..6a38cb0955 100644
--- a/lib/bb/fetch2/__init__.py
+++ b/lib/bb/fetch2/__init__.py
@@ -430,6 +430,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
     uri_replace_decoded = list(decodeurl(uri_replace))
     logger.debug2("For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
     result_decoded = ['', '', '', '', '', {}]
+    # 0 - type, 1 - host, 2 - path, 3 - user,  4- pswd, 5 - params
     for loc, i in enumerate(uri_find_decoded):
         result_decoded[loc] = uri_decoded[loc]
         regexp = i
@@ -449,6 +450,9 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
                 for l in replacements:
                     uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l])
                 result_decoded[loc][k] = uri_replace_decoded[loc][k]
+        elif (loc == 3 or loc == 4) and uri_replace_decoded[loc]:
+            # User/password in the replacement is just a straight replacement
+            result_decoded[loc] = uri_replace_decoded[loc]
         elif (re.match(regexp, uri_decoded[loc])):
             if not uri_replace_decoded[loc]:
                 result_decoded[loc] = ""
diff --git a/lib/bb/tests/fetch.py b/lib/bb/tests/fetch.py
index 29a952f286..918fec9134 100644
--- a/lib/bb/tests/fetch.py
+++ b/lib/bb/tests/fetch.py
@@ -431,6 +431,10 @@ class MirrorUriTest(FetcherTest):
         ("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://git.openembedded.org/bitbake;protocol=http")
             : "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
 
+        ("git://user1@someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://user2@git.openembedded.org/bitbake;protocol=http")
+            : "git://user2@git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
+
+
         #Renaming files doesn't work
         #("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz") : "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz"
         #("file://sstate-xyz.tgz", "file://.*/.*", "file:///somewhere/1234/sstate-cache") : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
-- 
2.32.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 5/5] tests/fetch: Add test for url parameter rewriting
  2021-11-02 14:57 [PATCH 1/5] lib/bb: Clean up use of len() Richard Purdie
                   ` (2 preceding siblings ...)
  2021-11-02 14:57 ` [PATCH 4/5] fetch: Handle mirror user/password replacements correctly Richard Purdie
@ 2021-11-02 14:57 ` Richard Purdie
  3 siblings, 0 replies; 5+ messages in thread
From: Richard Purdie @ 2021-11-02 14:57 UTC (permalink / raw)
  To: bitbake-devel

Add a test to ensure that a parameter like protocol=git can be rewritten
to a different url and protocol.

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
---
 lib/bb/tests/fetch.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lib/bb/tests/fetch.py b/lib/bb/tests/fetch.py
index 918fec9134..b83ec06450 100644
--- a/lib/bb/tests/fetch.py
+++ b/lib/bb/tests/fetch.py
@@ -430,9 +430,10 @@ class MirrorUriTest(FetcherTest):
             : "http://somewhere2.org/somefile_1.2.3.tar.gz",
         ("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://git.openembedded.org/bitbake;protocol=http")
             : "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
-
         ("git://user1@someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://user2@git.openembedded.org/bitbake;protocol=http")
             : "git://user2@git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
+        ("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;protocol=git;branch=master", "git://someserver.org/bitbake", "git://someotherserver.org/bitbake;protocol=https")
+            : "git://someotherserver.org/bitbake;tag=1234567890123456789012345678901234567890;protocol=https;branch=master",
 
 
         #Renaming files doesn't work
-- 
2.32.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-11-02 14:57 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-02 14:57 [PATCH 1/5] lib/bb: Clean up use of len() Richard Purdie
2021-11-02 14:57 ` [PATCH 2/5] daemonize: Avoid unclosed file warning Richard Purdie
2021-11-02 14:57 ` [PATCH 3/5] lib/bb: Fix string concatination potential performance issues Richard Purdie
2021-11-02 14:57 ` [PATCH 4/5] fetch: Handle mirror user/password replacements correctly Richard Purdie
2021-11-02 14:57 ` [PATCH 5/5] tests/fetch: Add test for url parameter rewriting Richard Purdie

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).