File 6006-core-make-sure-we-don-t-throttle-change-signal-gener.patch of Package systemd.39177

From 706dbbe91d05d2ef24eb02a757f96300fd920b65 Mon Sep 17 00:00:00 2001
From: Lennart Poettering <lennart@poettering.net>
Date: Tue, 13 Nov 2018 12:48:49 +0100
Subject: [PATCH 6006/6006] core: make sure we don't throttle change signal
 generator when a reload is pending

Fixes: #10627
(cherry picked from commit b8d381c47776ea0440af175cbe0c02cb743bde08)

[fbui: adjust context]
[fbui: fixes bsc#1241079]
[fbui: fixes bsc#1241586]
---
 src/core/manager.c | 64 ++++++++++++++++++++++++++++------------------
 1 file changed, 39 insertions(+), 25 deletions(-)

diff --git a/src/core/manager.c b/src/core/manager.c
index cacf00d1bc..ade98adde9 100644
--- a/src/core/manager.c
+++ b/src/core/manager.c
@@ -1560,56 +1560,70 @@ static unsigned manager_dispatch_dbus_queue(Manager *m) {
 
         assert(m);
 
+        /* Avoid recursion */
         if (m->dispatching_dbus_queue)
                 return 0;
 
-        /* Anything to do at all? */
-        if (!m->dbus_unit_queue && !m->dbus_job_queue && !m->send_reloading_done && !m->pending_reload_message)
-                return 0;
+        /* When we are reloading, let's not wait with generating signals, since we need to exit the manager as quickly
+         * as we can. There's no point in throttling generation of signals in that case. */
+        if (m->n_reloading > 0 || m->send_reloading_done || m->pending_reload_message)
+                budget = (unsigned) -1; /* infinite budget in this case */
+        else {
+                /* Anything to do at all? */
+                if (!m->dbus_unit_queue && !m->dbus_job_queue)
+                        return 0;
 
-        /* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's sit this
-         * cycle out, and process things in a later cycle when the queues got a bit emptier. */
-        if (manager_bus_n_queued_write(m) > MANAGER_BUS_BUSY_THRESHOLD)
-                return 0;
+                /* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's
+                 * sit this cycle out, and process things in a later cycle when the queues got a bit emptier. */
+                if (manager_bus_n_queued_write(m) > MANAGER_BUS_BUSY_THRESHOLD)
+                        return 0;
 
-        /* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't overly
-         * full before this call we shouldn't increase it in size too wildly in one step, and we shouldn't monopolize
-         * CPU time with generating these messages. Note the difference in counting of this "budget" and the
-         * "threshold" above: the "budget" is decreased only once per generated message, regardless how many
-         * busses/direct connections it is enqueued on, while the "threshold" is applied to each queued instance of bus
-         * message, i.e. if the same message is enqueued to five busses/direct connections it will be counted five
-         * times. This difference in counting ("references" vs. "instances") is primarily a result of the fact that
-         * it's easier to implement it this way, however it also reflects the thinking that the "threshold" should put
-         * a limit on used queue memory, i.e. space, while the "budget" should put a limit on time. Also note that
-         * the "threshold" is currently chosen much higher than the "budget". */
-        budget = MANAGER_BUS_MESSAGE_BUDGET;
+                /* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't
+                 * overly full before this call we shouldn't increase it in size too wildly in one step, and we
+                 * shouldn't monopolize CPU time with generating these messages. Note the difference in counting of
+                 * this "budget" and the "threshold" above: the "budget" is decreased only once per generated message,
+                 * regardless how many busses/direct connections it is enqueued on, while the "threshold" is applied to
+                 * each queued instance of bus message, i.e. if the same message is enqueued to five busses/direct
+                 * connections it will be counted five times. This difference in counting ("references"
+                 * vs. "instances") is primarily a result of the fact that it's easier to implement it this way,
+                 * however it also reflects the thinking that the "threshold" should put a limit on used queue memory,
+                 * i.e. space, while the "budget" should put a limit on time. Also note that the "threshold" is
+                 * currently chosen much higher than the "budget". */
+                budget = MANAGER_BUS_MESSAGE_BUDGET;
+        }
 
         m->dispatching_dbus_queue = true;
 
-        while (budget > 0 && (u = m->dbus_unit_queue)) {
+        while (budget != 0 && (u = m->dbus_unit_queue)) {
 
                 assert(u->in_dbus_queue);
 
                 bus_unit_send_change_signal(u);
-                n++, budget--;
+                n++;
+
+                if (budget != (unsigned) -1)
+                        budget--;
         }
 
-        while (budget > 0 && (j = m->dbus_job_queue)) {
+        while (budget != 0 && (j = m->dbus_job_queue)) {
                 assert(j->in_dbus_queue);
 
                 bus_job_send_change_signal(j);
-                n++, budget--;
+                n++;
+
+                if (budget != (unsigned) -1)
+                        budget--;
         }
 
         m->dispatching_dbus_queue = false;
 
-        if (budget > 0 && m->send_reloading_done) {
+        if (m->send_reloading_done) {
                 m->send_reloading_done = false;
                 bus_manager_send_reloading(m, false);
-                n++, budget--;
+                n++;
         }
 
-        if (budget > 0 && m->pending_reload_message) {
+        if (m->pending_reload_message) {
                 bus_send_pending_reload_message(m);
                 n++;
         }
-- 
2.43.0

openSUSE Build Service is sponsored by