File apache2-mod_http2-1.15.14.patch of Package apache2.21779

diff -up httpd-2.4.33/modules/http2/h2_bucket_beam.c httpd-2.4.46/modules/http2/h2_bucket_beam.c
--- httpd-2.4.33/modules/http2/h2_bucket_beam.c	2020-08-11 15:45:10.397093467 +0200
+++ httpd-2.4.46/modules/http2/h2_bucket_beam.c	2020-03-06 17:15:17.000000000 +0100
@@ -196,7 +196,7 @@ static apr_bucket *h2_beam_bucket(h2_buc
  * bucket beam that can transport buckets across threads
  ******************************************************************************/
 
-static void mutex_leave(void *ctx, apr_thread_mutex_t *lock)
+static void mutex_leave(apr_thread_mutex_t *lock)
 {
     apr_thread_mutex_unlock(lock);
 }
@@ -217,7 +217,7 @@ static apr_status_t enter_yellow(h2_buck
 static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
 {
     if (pbl->leave) {
-        pbl->leave(pbl->leave_ctx, pbl->mutex);
+        pbl->leave(pbl->mutex);
     }
 }
 
diff -up httpd-2.4.33/modules/http2/h2_bucket_beam.h httpd-2.4.46/modules/http2/h2_bucket_beam.h
--- httpd-2.4.33/modules/http2/h2_bucket_beam.h	2018-02-10 16:46:12.000000000 +0100
+++ httpd-2.4.46/modules/http2/h2_bucket_beam.h	2020-03-06 17:15:17.000000000 +0100
@@ -126,12 +126,11 @@ typedef struct {
  * buffers until the transmission is complete. Star gates use a similar trick.
  */
 
-typedef void h2_beam_mutex_leave(void *ctx,  struct apr_thread_mutex_t *lock);
+typedef void h2_beam_mutex_leave(struct apr_thread_mutex_t *lock);
 
 typedef struct {
     apr_thread_mutex_t *mutex;
     h2_beam_mutex_leave *leave;
-    void *leave_ctx;
 } h2_beam_lock;
 
 typedef struct h2_bucket_beam h2_bucket_beam;
diff -up httpd-2.4.33/modules/http2/h2_config.c httpd-2.4.46/modules/http2/h2_config.c
--- httpd-2.4.33/modules/http2/h2_config.c	2020-08-11 15:45:10.513094213 +0200
+++ httpd-2.4.46/modules/http2/h2_config.c	2020-01-02 00:26:43.000000000 +0100
@@ -269,8 +269,7 @@ static apr_int64_t h2_srv_config_geti64(
         case H2_CONF_UPGRADE:
             return H2_CONFIG_GET(conf, &defconf, h2_upgrade);
         case H2_CONF_DIRECT:
-            return 1;
-            /*return H2_CONFIG_GET(conf, &defconf, h2_direct);*/
+            return H2_CONFIG_GET(conf, &defconf, h2_direct);
         case H2_CONF_TLS_WARMUP_SIZE:
             return H2_CONFIG_GET(conf, &defconf, tls_warmup_size);
         case H2_CONF_TLS_COOLDOWN_SECS:
diff -up httpd-2.4.33/modules/http2/h2_conn.c httpd-2.4.46/modules/http2/h2_conn.c
--- httpd-2.4.33/modules/http2/h2_conn.c	2020-08-11 15:45:10.513094213 +0200
+++ httpd-2.4.46/modules/http2/h2_conn.c	2020-07-08 13:53:48.000000000 +0200
@@ -80,7 +80,7 @@ static void check_modules(int force)
                 mpm_type = H2_MPM_PREFORK;
                 mpm_module = m;
                 /* While http2 can work really well on prefork, it collides
-                 * today's use case for prefork: runnning single-thread app engines
+                 * today's use case for prefork: running single-thread app engines
                  * like php. If we restrict h2_workers to 1 per process, php will
                  * work fine, but browser will be limited to 1 active request at a
                  * time. */
@@ -138,7 +138,7 @@ apr_status_t h2_conn_child_init(apr_pool
     ap_register_input_filter("H2_IN", h2_filter_core_input,
                              NULL, AP_FTYPE_CONNECTION);
    
-    status = h2_mplx_child_init(pool, s);
+    status = h2_mplx_m_child_init(pool, s);
 
     if (status == APR_SUCCESS) {
         status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
@@ -187,6 +187,12 @@ apr_status_t h2_conn_setup(conn_rec *c,
     if (APR_SUCCESS == (status = h2_session_create(&session, c, r, s, workers))) {
         ctx = h2_ctx_get(c, 1);
         h2_ctx_session_set(ctx, session);
+
+        /* remove the input filter of mod_reqtimeout, now that the connection
+         * is established and we have swtiched to h2. reqtimeout has supervised
+         * possibly configured handshake timeouts and needs to get out of the way
+         * now since the rest of its state handling assumes http/1.x to take place. */
+        ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
     }
     
     return status;
@@ -260,7 +266,7 @@ apr_status_t h2_conn_pre_close(struct h2
     return DONE;
 }
 
-conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
+conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent)
 {
     apr_allocator_t *allocator;
     apr_status_t status;
@@ -271,11 +277,11 @@ conn_rec *h2_slave_create(conn_rec *mast
     
     ap_assert(master);
     ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
-                  "h2_stream(%ld-%d): create slave", master->id, slave_id);
+                  "h2_stream(%ld-%d): create secondary", master->id, sec_id);
     
     /* We create a pool with its own allocator to be used for
      * processing a request. This is the only way to have the processing
-     * independant of its parent pool in the sense that it can work in
+     * independent of its parent pool in the sense that it can work in
      * another thread. Also, the new allocator needs its own mutex to
      * synchronize sub-pools.
      */
@@ -284,18 +290,18 @@ conn_rec *h2_slave_create(conn_rec *mast
     status = apr_pool_create_ex(&pool, parent, NULL, allocator);
     if (status != APR_SUCCESS) {
         ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master, 
-                      APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
-                      master->id, slave_id);
+                      APLOGNO(10004) "h2_session(%ld-%d): create secondary pool",
+                      master->id, sec_id);
         return NULL;
     }
     apr_allocator_owner_set(allocator, pool);
-    apr_pool_tag(pool, "h2_slave_conn");
+    apr_pool_tag(pool, "h2_secondary_conn");
  
     c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
     if (c == NULL) {
         ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, 
-                      APLOGNO(02913) "h2_session(%ld-%d): create slave",
-                      master->id, slave_id);
+                      APLOGNO(02913) "h2_session(%ld-%d): create secondary",
+                      master->id, sec_id);
         apr_pool_destroy(pool);
         return NULL;
     }
@@ -314,27 +320,27 @@ conn_rec *h2_slave_create(conn_rec *mast
 #endif
     c->bucket_alloc           = apr_bucket_alloc_create(pool);
 #if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
-     c->data_in_input_filters  = 0;
-     c->data_in_output_filters = 0;
+    c->data_in_input_filters  = 0;
+    c->data_in_output_filters = 0;
 #endif
     /* prevent mpm_event from making wrong assumptions about this connection,
      * like e.g. using its socket for an async read check. */
     c->clogging_input_filters = 1;
     c->log                    = NULL;
     c->log_id                 = apr_psprintf(pool, "%ld-%d", 
-                                             master->id, slave_id);
+                                             master->id, sec_id);
     c->aborted                = 0;
-    /* We cannot install the master connection socket on the slaves, as
+    /* We cannot install the master connection socket on the secondary, as
      * modules mess with timeouts/blocking of the socket, with
      * unwanted side effects to the master connection processing.
-     * Fortunately, since we never use the slave socket, we can just install
+     * Fortunately, since we never use the secondary socket, we can just install
      * a single, process-wide dummy and everyone is happy.
      */
     ap_set_module_config(c->conn_config, &core_module, dummy_socket);
     /* TODO: these should be unique to this thread */
     c->sbh                    = master->sbh;
-    /* TODO: not all mpm modules have learned about slave connections yet.
-     * copy their config from master to slave.
+    /* TODO: not all mpm modules have learned about secondary connections yet.
+     * copy their config from master to secondary.
      */
     if ((mpm = h2_conn_mpm_module()) != NULL) {
         cfg = ap_get_module_config(master->conn_config, mpm);
@@ -342,38 +348,38 @@ conn_rec *h2_slave_create(conn_rec *mast
     }
 
     ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, 
-                  "h2_slave(%s): created", c->log_id);
+                  "h2_secondary(%s): created", c->log_id);
     return c;
 }
 
-void h2_slave_destroy(conn_rec *slave)
+void h2_secondary_destroy(conn_rec *secondary)
 {
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave,
-                  "h2_slave(%s): destroy", slave->log_id);
-    slave->sbh = NULL;
-    apr_pool_destroy(slave->pool);
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, secondary,
+                  "h2_secondary(%s): destroy", secondary->log_id);
+    secondary->sbh = NULL;
+    apr_pool_destroy(secondary->pool);
 }
 
-apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
+apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd)
 {
-    if (slave->keepalives == 0) {
+    if (secondary->keepalives == 0) {
         /* Simulate that we had already a request on this connection. Some
          * hooks trigger special behaviour when keepalives is 0. 
          * (Not necessarily in pre_connection, but later. Set it here, so it
          * is in place.) */
-        slave->keepalives = 1;
+        secondary->keepalives = 1;
         /* We signal that this connection will be closed after the request.
          * Which is true in that sense that we throw away all traffic data
-         * on this slave connection after each requests. Although we might
+         * on this secondary connection after each requests. Although we might
          * reuse internal structures like memory pools.
          * The wanted effect of this is that httpd does not try to clean up
          * any dangling data on this connection when a request is done. Which
-         * is unneccessary on a h2 stream.
+         * is unnecessary on a h2 stream.
          */
-        slave->keepalive = AP_CONN_CLOSE;
-        return ap_run_pre_connection(slave, csd);
+        secondary->keepalive = AP_CONN_CLOSE;
+        return ap_run_pre_connection(secondary, csd);
     }
-    ap_assert(slave->output_filters);
+    ap_assert(secondary->output_filters);
     return APR_SUCCESS;
 }
 
diff -up httpd-2.4.33/modules/http2/h2_conn.h httpd-2.4.46/modules/http2/h2_conn.h
--- httpd-2.4.33/modules/http2/h2_conn.h	2020-08-11 15:45:10.429093673 +0200
+++ httpd-2.4.46/modules/http2/h2_conn.h	2020-07-08 13:53:48.000000000 +0200
@@ -68,10 +68,10 @@ h2_mpm_type_t h2_conn_mpm_type(void);
 const char *h2_conn_mpm_name(void);
 int h2_mpm_supported(void);
 
-conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent);
-void h2_slave_destroy(conn_rec *slave);
+conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent);
+void h2_secondary_destroy(conn_rec *secondary);
 
-apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
-void h2_slave_run_connection(conn_rec *slave);
+apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd);
+void h2_secondary_run_connection(conn_rec *secondary);
 
 #endif /* defined(__mod_h2__h2_conn__) */
diff -up httpd-2.4.33/modules/http2/h2_filter.c httpd-2.4.46/modules/http2/h2_filter.c
--- httpd-2.4.33/modules/http2/h2_filter.c	2020-08-11 15:45:10.513094213 +0200
+++ httpd-2.4.46/modules/http2/h2_filter.c	2020-07-08 13:53:48.000000000 +0200
@@ -370,7 +370,7 @@ static void add_streams(apr_bucket_briga
     x.s = s;
     x.idx = 0;
     bbout(bb, "  \"streams\": {");
-    h2_mplx_stream_do(s->mplx, add_stream, &x);
+    h2_mplx_m_stream_do(s->mplx, add_stream, &x);
     bbout(bb, "\n  }%s\n", last? "" : ",");
 }
 
@@ -433,7 +433,7 @@ static void add_stats(apr_bucket_brigade
 static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b)
 {
     h2_mplx *m = task->mplx;
-    h2_stream *stream = h2_mplx_stream_get(m, task->stream_id);
+    h2_stream *stream = h2_mplx_t_stream_get(m, task);
     h2_session *s;
     conn_rec *c;
     
diff -up httpd-2.4.33/modules/http2/h2_from_h1.c httpd-2.4.46/modules/http2/h2_from_h1.c
--- httpd-2.4.33/modules/http2/h2_from_h1.c	2020-08-11 15:45:10.429093673 +0200
+++ httpd-2.4.46/modules/http2/h2_from_h1.c	2020-07-15 16:17:17.000000000 +0200
@@ -315,6 +315,7 @@ typedef struct h2_response_parser {
     int http_status;
     apr_array_header_t *hlines;
     apr_bucket_brigade *tmp;
+    apr_bucket_brigade *saveto;
 } h2_response_parser;
 
 static apr_status_t parse_header(h2_response_parser *parser, char *line) {
@@ -351,13 +352,17 @@ static apr_status_t get_line(h2_response
         parser->tmp = apr_brigade_create(task->pool, task->c->bucket_alloc);
     }
     status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ, 
-                                    HUGE_STRING_LEN);
+                                    len);
     if (status == APR_SUCCESS) {
         --len;
         status = apr_brigade_flatten(parser->tmp, line, &len);
         if (status == APR_SUCCESS) {
             /* we assume a non-0 containing line and remove trailing crlf. */
             line[len] = '\0';
+            /*
+             * XXX: What to do if there is an LF but no CRLF?
+             *      Should we error out?
+             */
             if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
                 len -= 2;
                 line[len] = '\0';
@@ -367,10 +372,47 @@ static apr_status_t get_line(h2_response
                               task->id, line);
             }
             else {
+                apr_off_t brigade_length;
+
+                /*
+                 * If the brigade parser->tmp becomes longer than our buffer
+                 * for flattening we never have a chance to get a complete
+                 * line. This can happen if we are called multiple times after
+                 * previous calls did not find a H2_CRLF and we returned
+                 * APR_EAGAIN. In this case parser->tmp (correctly) grows
+                 * with each call to apr_brigade_split_line.
+                 *
+                 * XXX: Currently a stack based buffer of HUGE_STRING_LEN is
+                 * used. This means we cannot cope with lines larger than
+                 * HUGE_STRING_LEN which might be an issue.
+                 */
+                status = apr_brigade_length(parser->tmp, 0, &brigade_length);
+                if ((status != APR_SUCCESS) || (brigade_length > len)) {
+                    ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, task->c, APLOGNO(10257)
+                                  "h2_task(%s): read response, line too long",
+                                  task->id);
+                    return APR_ENOSPC;
+                }
                 /* this does not look like a complete line yet */
                 ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
                               "h2_task(%s): read response, incomplete line: %s", 
                               task->id, line);
+                if (!parser->saveto) {
+                    parser->saveto = apr_brigade_create(task->pool,
+                                                        task->c->bucket_alloc);
+                }
+                /*
+                 * Be on the save side and save the parser->tmp brigade
+                 * as it could contain transient buckets which could be
+                 * invalid next time we are here.
+                 *
+                 * NULL for the filter parameter is ok since we
+                 * provide our own brigade as second parameter
+                 * and ap_save_brigade does not need to create one.
+                 */
+                ap_save_brigade(NULL, &(parser->saveto), &(parser->tmp),
+                                parser->tmp->p);
+                APR_BRIGADE_CONCAT(parser->tmp, parser->saveto);
                 return APR_EAGAIN;
             }
         }
diff -up httpd-2.4.33/modules/http2/h2.h httpd-2.4.46/modules/http2/h2.h
--- httpd-2.4.33/modules/http2/h2.h	2020-08-11 15:45:10.513094213 +0200
+++ httpd-2.4.46/modules/http2/h2.h	2020-02-21 01:33:40.000000000 +0100
@@ -138,7 +138,7 @@ struct h2_request {
     apr_table_t *headers;
 
     apr_time_t request_time;
-    unsigned int chunked : 1;   /* iff requst body needs to be forwarded as chunked */
+    unsigned int chunked : 1;   /* iff request body needs to be forwarded as chunked */
     unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
     apr_off_t raw_bytes;        /* RAW network bytes that generated this request - if known. */
 };
diff -up httpd-2.4.33/modules/http2/h2_h2.c httpd-2.4.46/modules/http2/h2_h2.c
--- httpd-2.4.33/modules/http2/h2_h2.c	2020-08-11 15:45:10.513094213 +0200
+++ httpd-2.4.46/modules/http2/h2_h2.c	2020-07-08 13:53:48.000000000 +0200
@@ -542,7 +542,7 @@ int h2_allows_h2_upgrade(request_rec *r)
  * Register various hooks
  */
 static const char* const mod_ssl[]        = { "mod_ssl.c", NULL};
-static const char* const mod_reqtimeout[] = { "mod_reqtimeout.c", NULL};
+static const char* const mod_reqtimeout[] = { "mod_ssl.c", "mod_reqtimeout.c", NULL};
 
 void h2_h2_register_hooks(void)
 {
@@ -553,7 +553,7 @@ void h2_h2_register_hooks(void)
      * a chance to take over before it.
      */
     ap_hook_process_connection(h2_h2_process_conn, 
-                               mod_ssl, mod_reqtimeout, APR_HOOK_LAST);
+                               mod_reqtimeout, NULL, APR_HOOK_LAST);
     
     /* One last chance to properly say goodbye if we have not done so
      * already. */
@@ -666,7 +666,7 @@ static int h2_h2_pre_close_conn(conn_rec
 {
     h2_ctx *ctx;
 
-    /* slave connection? */
+    /* secondary connection? */
     if (c->master) {
         return DECLINED;
     }
@@ -710,7 +710,7 @@ static void check_push(request_rec *r, c
 
 static int h2_h2_post_read_req(request_rec *r)
 {
-    /* slave connection? */
+    /* secondary connection? */
     if (r->connection->master) {
         struct h2_task *task = h2_ctx_get_task(r->connection);
         /* This hook will get called twice on internal redirects. Take care
@@ -729,7 +729,7 @@ static int h2_h2_post_read_req(request_r
             ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
             
             for (f = r->input_filters; f; f = f->next) {
-                if (!strcmp("H2_SLAVE_IN", f->frec->name)) {
+                if (!strcmp("H2_SECONDARY_IN", f->frec->name)) {
                     f->r = r;
                     break;
                 }
@@ -743,7 +743,7 @@ static int h2_h2_post_read_req(request_r
 
 static int h2_h2_late_fixups(request_rec *r)
 {
-    /* slave connection? */
+    /* secondary connection? */
     if (r->connection->master) {
         struct h2_task *task = h2_ctx_get_task(r->connection);
         if (task) {
@@ -751,7 +751,7 @@ static int h2_h2_late_fixups(request_rec
             task->output.copy_files = h2_config_rgeti(r, H2_CONF_COPY_FILES);
             if (task->output.copy_files) {
                 ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
-                              "h2_slave_out(%s): copy_files on", task->id);
+                              "h2_secondary_out(%s): copy_files on", task->id);
                 h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL);
             }
             check_push(r, "late_fixup");
diff -up httpd-2.4.33/modules/http2/h2_mplx.c httpd-2.4.46/modules/http2/h2_mplx.c
--- httpd-2.4.33/modules/http2/h2_mplx.c	2020-08-11 15:45:10.513094213 +0200
+++ httpd-2.4.46/modules/http2/h2_mplx.c	2020-07-08 13:53:48.000000000 +0200
@@ -56,10 +56,18 @@ typedef struct {
     apr_size_t count;
 } stream_iter_ctx;
 
-static apr_status_t mplx_be_happy(h2_mplx *m);
-static apr_status_t mplx_be_annoyed(h2_mplx *m);
+/**
+ * Naming convention for static functions:
+ * - m_*: function only called from the master connection
+ * - s_*: function only called from a secondary connection
+ * - t_*: function only called from a h2_task holder
+ * - mst_*: function called from everyone
+ */
+
+static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task);
+static apr_status_t m_be_annoyed(h2_mplx *m);
 
-apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
+apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s)
 {
     return APR_SUCCESS;
 }
@@ -75,40 +83,40 @@ apr_status_t h2_mplx_child_init(apr_pool
 #define H2_MPLX_ENTER_ALWAYS(m)    \
     apr_thread_mutex_lock(m->lock)
 
-#define H2_MPLX_ENTER_MAYBE(m, lock)    \
-    if (lock) apr_thread_mutex_lock(m->lock)
+#define H2_MPLX_ENTER_MAYBE(m, dolock)    \
+    if (dolock) apr_thread_mutex_lock(m->lock)
 
-#define H2_MPLX_LEAVE_MAYBE(m, lock)    \
-    if (lock) apr_thread_mutex_unlock(m->lock)
+#define H2_MPLX_LEAVE_MAYBE(m, dolock)    \
+    if (dolock) apr_thread_mutex_unlock(m->lock)
 
-static void check_data_for(h2_mplx *m, h2_stream *stream, int lock);
+static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked);
 
-static void stream_output_consumed(void *ctx, 
-                                   h2_bucket_beam *beam, apr_off_t length)
+static void mst_stream_output_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
 {
 }
 
-static void stream_input_ev(void *ctx, h2_bucket_beam *beam)
+static void mst_stream_input_ev(void *ctx, h2_bucket_beam *beam)
 {
     h2_stream *stream = ctx;
     h2_mplx *m = stream->session->mplx;
     apr_atomic_set32(&m->event_pending, 1); 
 }
 
-static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
+static void m_stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
 {
     h2_stream_in_consumed(ctx, length);
 }
 
-static void stream_joined(h2_mplx *m, h2_stream *stream)
+static void ms_stream_joined(h2_mplx *m, h2_stream *stream)
 {
     ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
     
+    h2_ififo_remove(m->readyq, stream->id);
     h2_ihash_remove(m->shold, stream->id);
     h2_ihash_add(m->spurge, stream);
 }
 
-static void stream_cleanup(h2_mplx *m, h2_stream *stream)
+static void m_stream_cleanup(h2_mplx *m, h2_stream *stream)
 {
     ap_assert(stream->state == H2_SS_CLEANUP);
 
@@ -125,14 +133,16 @@ static void stream_cleanup(h2_mplx *m, h
 
     h2_ihash_remove(m->streams, stream->id);
     h2_iq_remove(m->q, stream->id);
-    h2_ififo_remove(m->readyq, stream->id);
-    h2_ihash_add(m->shold, stream);
     
     if (!h2_task_has_started(stream->task) || stream->task->done_done) {
-        stream_joined(m, stream);
+        ms_stream_joined(m, stream);
     }
-    else if (stream->task) {
-        stream->task->c->aborted = 1;
+    else {
+        h2_ififo_remove(m->readyq, stream->id);
+        h2_ihash_add(m->shold, stream);
+        if (stream->task) {
+            stream->task->c->aborted = 1;
+        }
     }
 }
 
@@ -147,8 +157,8 @@ static void stream_cleanup(h2_mplx *m, h
  *   their HTTP/1 cousins, the separate allocator seems to work better
  *   than protecting a shared h2_session one with an own lock.
  */
-h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent, 
-                        h2_workers *workers)
+h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *parent, 
+                          h2_workers *workers)
 {
     apr_status_t status = APR_SUCCESS;
     apr_allocator_t *allocator;
@@ -162,8 +172,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, ser
         m->s = s;
         
         /* We create a pool with its own allocator to be used for
-         * processing slave connections. This is the only way to have the
-         * processing independant of its parent pool in the sense that it
+         * processing secondary connections. This is the only way to have the
+         * processing independent of its parent pool in the sense that it
          * can work in another thread. Also, the new allocator needs its own
          * mutex to synchronize sub-pools.
          */
@@ -214,12 +224,12 @@ h2_mplx *h2_mplx_create(conn_rec *c, ser
         m->last_mood_change = apr_time_now();
         m->mood_update_interval = apr_time_from_msec(100);
         
-        m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
+        m->spare_secondary = apr_array_make(m->pool, 10, sizeof(conn_rec*));
     }
     return m;
 }
 
-int h2_mplx_shutdown(h2_mplx *m)
+int h2_mplx_m_shutdown(h2_mplx *m)
 {
     int max_stream_started = 0;
     
@@ -233,7 +243,7 @@ int h2_mplx_shutdown(h2_mplx *m)
     return max_stream_started;
 }
 
-static int input_consumed_signal(h2_mplx *m, h2_stream *stream)
+static int m_input_consumed_signal(h2_mplx *m, h2_stream *stream)
 {
     if (stream->input) {
         return h2_beam_report_consumption(stream->input);
@@ -241,12 +251,12 @@ static int input_consumed_signal(h2_mplx
     return 0;
 }
 
-static int report_consumption_iter(void *ctx, void *val)
+static int m_report_consumption_iter(void *ctx, void *val)
 {
     h2_stream *stream = val;
     h2_mplx *m = ctx;
     
-    input_consumed_signal(m, stream);
+    m_input_consumed_signal(m, stream);
     if (stream->state == H2_SS_CLOSED_L
         && (!stream->task || stream->task->worker_done)) {
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, 
@@ -257,7 +267,7 @@ static int report_consumption_iter(void
     return 1;
 }
 
-static int output_consumed_signal(h2_mplx *m, h2_task *task)
+static int s_output_consumed_signal(h2_mplx *m, h2_task *task)
 {
     if (task->output.beam) {
         return h2_beam_report_consumption(task->output.beam);
@@ -265,7 +275,7 @@ static int output_consumed_signal(h2_mpl
     return 0;
 }
 
-static int stream_destroy_iter(void *ctx, void *val) 
+static int m_stream_destroy_iter(void *ctx, void *val) 
 {   
     h2_mplx *m = ctx;
     h2_stream *stream = val;
@@ -275,7 +285,7 @@ static int stream_destroy_iter(void *ctx
     
     if (stream->input) {
         /* Process outstanding events before destruction */
-        input_consumed_signal(m, stream);    
+        m_input_consumed_signal(m, stream);    
         h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy");
         h2_beam_destroy(stream->input);
         stream->input = NULL;
@@ -283,12 +293,12 @@ static int stream_destroy_iter(void *ctx
 
     if (stream->task) {
         h2_task *task = stream->task;
-        conn_rec *slave;
-        int reuse_slave = 0;
+        conn_rec *secondary;
+        int reuse_secondary = 0;
         
         stream->task = NULL;
-        slave = task->c;
-        if (slave) {
+        secondary = task->c;
+        if (secondary) {
             /* On non-serialized requests, the IO logging has not accounted for any
              * meta data send over the network: response headers and h2 frame headers. we
              * counted this on the stream and need to add this now.
@@ -297,26 +307,25 @@ static int stream_destroy_iter(void *ctx
             if (task->request && !task->request->serialize && h2_task_logio_add_bytes_out) {
                 apr_off_t unaccounted = stream->out_frame_octets - stream->out_data_octets;
                 if (unaccounted > 0) {
-                    h2_task_logio_add_bytes_out(slave, unaccounted);
+                    h2_task_logio_add_bytes_out(secondary, unaccounted);
                 }
             }
         
-            if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) {
-                reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2))
-                               && !task->rst_error);
+            if (m->s->keep_alive_max == 0 || secondary->keepalives < m->s->keep_alive_max) {
+                reuse_secondary = ((m->spare_secondary->nelts < (m->limit_active * 3 / 2))
+                                   && !task->rst_error);
             }
             
-            task->c = NULL;
-            if (reuse_slave) {
+            if (reuse_secondary) {
                 h2_beam_log(task->output.beam, m->c, APLOG_DEBUG, 
-                            APLOGNO(03385) "h2_task_destroy, reuse slave");    
+                            APLOGNO(03385) "h2_task_destroy, reuse secondary");    
                 h2_task_destroy(task);
-                APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
+                APR_ARRAY_PUSH(m->spare_secondary, conn_rec*) = secondary;
             }
             else {
                 h2_beam_log(task->output.beam, m->c, APLOG_TRACE1, 
-                            "h2_task_destroy, destroy slave");    
-                h2_slave_destroy(slave);
+                            "h2_task_destroy, destroy secondary");    
+                h2_secondary_destroy(secondary);
             }
         }
     }
@@ -324,11 +333,11 @@ static int stream_destroy_iter(void *ctx
     return 0;
 }
 
-static void purge_streams(h2_mplx *m, int lock)
+static void m_purge_streams(h2_mplx *m, int lock)
 {
     if (!h2_ihash_empty(m->spurge)) {
         H2_MPLX_ENTER_MAYBE(m, lock);
-        while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) {
+        while (!h2_ihash_iter(m->spurge, m_stream_destroy_iter, m)) {
             /* repeat until empty */
         }
         H2_MPLX_LEAVE_MAYBE(m, lock);
@@ -340,13 +349,13 @@ typedef struct {
     void *ctx;
 } stream_iter_ctx_t;
 
-static int stream_iter_wrap(void *ctx, void *stream)
+static int m_stream_iter_wrap(void *ctx, void *stream)
 {
     stream_iter_ctx_t *x = ctx;
     return x->cb(stream, x->ctx);
 }
 
-apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
+apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
 {
     stream_iter_ctx_t x;
     
@@ -354,13 +363,13 @@ apr_status_t h2_mplx_stream_do(h2_mplx *
 
     x.cb = cb;
     x.ctx = ctx;
-    h2_ihash_iter(m->streams, stream_iter_wrap, &x);
+    h2_ihash_iter(m->streams, m_stream_iter_wrap, &x);
         
     H2_MPLX_LEAVE(m);
     return APR_SUCCESS;
 }
 
-static int report_stream_iter(void *ctx, void *val) {
+static int m_report_stream_iter(void *ctx, void *val) {
     h2_mplx *m = ctx;
     h2_stream *stream = val;
     h2_task *task = stream->task;
@@ -385,7 +394,7 @@ static int report_stream_iter(void *ctx,
     return 1;
 }
 
-static int unexpected_stream_iter(void *ctx, void *val) {
+static int m_unexpected_stream_iter(void *ctx, void *val) {
     h2_mplx *m = ctx;
     h2_stream *stream = val;
     ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
@@ -394,7 +403,7 @@ static int unexpected_stream_iter(void *
     return 1;
 }
 
-static int stream_cancel_iter(void *ctx, void *val) {
+static int m_stream_cancel_iter(void *ctx, void *val) {
     h2_mplx *m = ctx;
     h2_stream *stream = val;
 
@@ -408,11 +417,11 @@ static int stream_cancel_iter(void *ctx,
     h2_stream_rst(stream, H2_ERR_NO_ERROR);
     /* All connection data has been sent, simulate cleanup */
     h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
-    stream_cleanup(m, stream);  
+    m_stream_cleanup(m, stream);  
     return 0;
 }
 
-void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
+void h2_mplx_m_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
 {
     apr_status_t status;
     int i, wait_secs = 60, old_aborted;
@@ -426,7 +435,7 @@ void h2_mplx_release_and_join(h2_mplx *m
     
     H2_MPLX_ENTER_ALWAYS(m);
 
-    /* While really terminating any slave connections, treat the master
+    /* While really terminating any secondary connections, treat the master
      * connection as aborted. It's not as if we could send any more data
      * at this point. */
     old_aborted = m->c->aborted;
@@ -438,7 +447,7 @@ void h2_mplx_release_and_join(h2_mplx *m
                   "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks", 
                   m->id, (int)h2_ihash_count(m->streams),
                   (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
-    while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
+    while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) {
         /* until empty */
     }
     
@@ -460,7 +469,7 @@ void h2_mplx_release_and_join(h2_mplx *m
             ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198)
                           "h2_mplx(%ld): waited %d sec for %d tasks", 
                           m->id, i*wait_secs, (int)h2_ihash_count(m->shold));
-            h2_ihash_iter(m->shold, report_stream_iter, m);
+            h2_ihash_iter(m->shold, m_report_stream_iter, m);
         }
     }
     m->join_wait = NULL;
@@ -471,7 +480,7 @@ void h2_mplx_release_and_join(h2_mplx *m
         ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
                       "h2_mplx(%ld): unexpected %d streams in hold", 
                       m->id, (int)h2_ihash_count(m->shold));
-        h2_ihash_iter(m->shold, unexpected_stream_iter, m);
+        h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
     }
     
     m->c->aborted = old_aborted;
@@ -480,41 +489,40 @@ void h2_mplx_release_and_join(h2_mplx *m
     ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
 }
 
-apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
+apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, h2_stream *stream)
 {
     H2_MPLX_ENTER(m);
     
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, 
                   H2_STRM_MSG(stream, "cleanup"));
-    stream_cleanup(m, stream);        
+    m_stream_cleanup(m, stream);        
     
     H2_MPLX_LEAVE(m);
     return APR_SUCCESS;
 }
 
-h2_stream *h2_mplx_stream_get(h2_mplx *m, int id)
+h2_stream *h2_mplx_t_stream_get(h2_mplx *m, h2_task *task)
 {
     h2_stream *s = NULL;
     
     H2_MPLX_ENTER_ALWAYS(m);
 
-    s = h2_ihash_get(m->streams, id);
+    s = h2_ihash_get(m->streams, task->stream_id);
 
     H2_MPLX_LEAVE(m);
     return s;
 }
 
-static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
+static void mst_output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
 {
     h2_stream *stream = ctx;
     h2_mplx *m = stream->session->mplx;
     
-    check_data_for(m, stream, 1);
+    mst_check_data_for(m, stream, 0);
 }
 
-static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+static apr_status_t t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
 {
-    apr_status_t status = APR_SUCCESS;
     h2_stream *stream = h2_ihash_get(m->streams, stream_id);
     
     if (!stream || !stream->task || m->aborted) {
@@ -525,26 +533,26 @@ static apr_status_t out_open(h2_mplx *m,
     stream->output = beam;
     
     if (APLOGctrace2(m->c)) {
-        h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open");
+        h2_beam_log(beam, stream->task->c, APLOG_TRACE2, "out_open");
     }
     else {
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->task->c,
                       "h2_mplx(%s): out open", stream->task->id);
     }
     
-    h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream);
-    h2_beam_on_produced(stream->output, output_produced, stream);
+    h2_beam_on_consumed(stream->output, NULL, mst_stream_output_consumed, stream);
+    h2_beam_on_produced(stream->output, mst_output_produced, stream);
     if (stream->task->output.copy_files) {
         h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL);
     }
     
     /* we might see some file buckets in the output, see
      * if we have enough handles reserved. */
-    check_data_for(m, stream, 0);
-    return status;
+    mst_check_data_for(m, stream, 1);
+    return APR_SUCCESS;
 }
 
-apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+apr_status_t h2_mplx_t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
 {
     apr_status_t status;
     
@@ -554,14 +562,14 @@ apr_status_t h2_mplx_out_open(h2_mplx *m
         status = APR_ECONNABORTED;
     }
     else {
-        status = out_open(m, stream_id, beam);
+        status = t_out_open(m, stream_id, beam);
     }
 
     H2_MPLX_LEAVE(m);
     return status;
 }
 
-static apr_status_t out_close(h2_mplx *m, h2_task *task)
+static apr_status_t s_out_close(h2_mplx *m, h2_task *task)
 {
     apr_status_t status = APR_SUCCESS;
     h2_stream *stream;
@@ -578,17 +586,17 @@ static apr_status_t out_close(h2_mplx *m
         return APR_ECONNABORTED;
     }
 
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c,
                   "h2_mplx(%s): close", task->id);
     status = h2_beam_close(task->output.beam);
-    h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close");
-    output_consumed_signal(m, task);
-    check_data_for(m, stream, 0);
+    h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "out_close");
+    s_output_consumed_signal(m, task);
+    mst_check_data_for(m, stream, 1);
     return status;
 }
 
-apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
-                                 apr_thread_cond_t *iowait)
+apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+                                   apr_thread_cond_t *iowait)
 {
     apr_status_t status;
     
@@ -597,12 +605,12 @@ apr_status_t h2_mplx_out_trywait(h2_mplx
     if (m->aborted) {
         status = APR_ECONNABORTED;
     }
-    else if (h2_mplx_has_master_events(m)) {
+    else if (h2_mplx_m_has_master_events(m)) {
         status = APR_SUCCESS;
     }
     else {
-        purge_streams(m, 0);
-        h2_ihash_iter(m->streams, report_consumption_iter, m);
+        m_purge_streams(m, 0);
+        h2_ihash_iter(m->streams, m_report_consumption_iter, m);
         m->added_output = iowait;
         status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
         if (APLOGctrace2(m->c)) {
@@ -617,19 +625,27 @@ apr_status_t h2_mplx_out_trywait(h2_mplx
     return status;
 }
 
-static void check_data_for(h2_mplx *m, h2_stream *stream, int lock)
+static void mst_check_data_for(h2_mplx *m, h2_stream *stream, int mplx_is_locked)
 {
+    /* If m->lock is already held, we must release during h2_ififo_push()
+     * which can wait on its not_full condition, causing a deadlock because
+     * no one would then be able to acquire m->lock to empty the fifo.
+     */
+    H2_MPLX_LEAVE_MAYBE(m, mplx_is_locked);
     if (h2_ififo_push(m->readyq, stream->id) == APR_SUCCESS) {
+        H2_MPLX_ENTER_ALWAYS(m);
         apr_atomic_set32(&m->event_pending, 1);
-        H2_MPLX_ENTER_MAYBE(m, lock);
         if (m->added_output) {
             apr_thread_cond_signal(m->added_output);
         }
-        H2_MPLX_LEAVE_MAYBE(m, lock);
+        H2_MPLX_LEAVE_MAYBE(m, !mplx_is_locked);
+    }
+    else {
+        H2_MPLX_ENTER_MAYBE(m, mplx_is_locked);
     }
 }
 
-apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
+apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
 {
     apr_status_t status;
     
@@ -649,22 +665,22 @@ apr_status_t h2_mplx_reprioritize(h2_mpl
     return status;
 }
 
-static void register_if_needed(h2_mplx *m) 
+static void ms_register_if_needed(h2_mplx *m, int from_master) 
 {
     if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) {
         apr_status_t status = h2_workers_register(m->workers, m); 
         if (status == APR_SUCCESS) {
             m->is_registered = 1;
         }
-        else {
+        else if (from_master) {
             ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021)
                           "h2_mplx(%ld): register at workers", m->id);
         }
     }
 }
 
-apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, 
-                             h2_stream_pri_cmp *cmp, void *ctx)
+apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream, 
+                               h2_stream_pri_cmp *cmp, void *ctx)
 {
     apr_status_t status;
     
@@ -678,13 +694,13 @@ apr_status_t h2_mplx_process(h2_mplx *m,
         h2_ihash_add(m->streams, stream);
         if (h2_stream_is_ready(stream)) {
             /* already have a response */
-            check_data_for(m, stream, 0);
+            mst_check_data_for(m, stream, 1);
             ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
                           H2_STRM_MSG(stream, "process, add to readyq")); 
         }
         else {
             h2_iq_add(m->q, stream->id, cmp, ctx);
-            register_if_needed(m);                
+            ms_register_if_needed(m, 1);                
             ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
                           H2_STRM_MSG(stream, "process, added to q")); 
         }
@@ -694,7 +710,7 @@ apr_status_t h2_mplx_process(h2_mplx *m,
     return status;
 }
 
-static h2_task *next_stream_task(h2_mplx *m)
+static h2_task *s_next_stream_task(h2_mplx *m)
 {
     h2_stream *stream;
     int sid;
@@ -703,15 +719,15 @@ static h2_task *next_stream_task(h2_mplx
         
         stream = h2_ihash_get(m->streams, sid);
         if (stream) {
-            conn_rec *slave, **pslave;
+            conn_rec *secondary, **psecondary;
 
-            pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
-            if (pslave) {
-                slave = *pslave;
-                slave->aborted = 0;
+            psecondary = (conn_rec **)apr_array_pop(m->spare_secondary);
+            if (psecondary) {
+                secondary = *psecondary;
+                secondary->aborted = 0;
             }
             else {
-                slave = h2_slave_create(m->c, stream->id, m->pool);
+                secondary = h2_secondary_create(m->c, stream->id, m->pool);
             }
             
             if (!stream->task) {
@@ -719,16 +735,16 @@ static h2_task *next_stream_task(h2_mplx
                     m->max_stream_started = sid;
                 }
                 if (stream->input) {
-                    h2_beam_on_consumed(stream->input, stream_input_ev, 
-                                        stream_input_consumed, stream);
+                    h2_beam_on_consumed(stream->input, mst_stream_input_ev, 
+                                        m_stream_input_consumed, stream);
                 }
                 
-                stream->task = h2_task_create(slave, stream->id, 
+                stream->task = h2_task_create(secondary, stream->id, 
                                               stream->request, m, stream->input, 
                                               stream->session->s->timeout,
                                               m->stream_max_mem);
                 if (!stream->task) {
-                    ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave,
+                    ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, secondary,
                                   H2_STRM_LOG(APLOGNO(02941), stream, 
                                   "create task"));
                     return NULL;
@@ -743,7 +759,7 @@ static h2_task *next_stream_task(h2_mplx
     return NULL;
 }
 
-apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask)
+apr_status_t h2_mplx_s_pop_task(h2_mplx *m, h2_task **ptask)
 {
     apr_status_t rv = APR_EOF;
     
@@ -759,7 +775,7 @@ apr_status_t h2_mplx_pop_task(h2_mplx *m
         rv = APR_EOF;
     }
     else {
-        *ptask = next_stream_task(m);
+        *ptask = s_next_stream_task(m);
         rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS;
     }
     if (APR_EAGAIN != rv) {
@@ -769,22 +785,22 @@ apr_status_t h2_mplx_pop_task(h2_mplx *m
     return rv;
 }
 
-static void task_done(h2_mplx *m, h2_task *task)
+static void s_task_done(h2_mplx *m, h2_task *task)
 {
     h2_stream *stream;
     
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
                   "h2_mplx(%ld): task(%s) done", m->id, task->id);
-    out_close(m, task);
+    s_out_close(m, task);
     
     task->worker_done = 1;
     task->done_at = apr_time_now();
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
                   "h2_mplx(%s): request done, %f ms elapsed", task->id, 
                   (task->done_at - task->started_at) / 1000.0);
     
     if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
-        mplx_be_happy(m);
+        s_mplx_be_happy(m, task);
     }
     
     ap_assert(task->done_done == 0);
@@ -796,60 +812,60 @@ static void task_done(h2_mplx *m, h2_tas
             /* reset and schedule again */
             h2_task_redo(task);
             h2_iq_add(m->q, stream->id, NULL, NULL);
-            ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
+            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
                           H2_STRM_MSG(stream, "redo, added to q")); 
         }
         else {
             /* stream not cleaned up, stay around */
             task->done_done = 1;
-            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
                           H2_STRM_MSG(stream, "task_done, stream open")); 
             if (stream->input) {
                 h2_beam_leave(stream->input);
             }
 
             /* more data will not arrive, resume the stream */
-            check_data_for(m, stream, 0);            
+            mst_check_data_for(m, stream, 1);            
         }
     }
     else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
         /* stream is done, was just waiting for this. */
         task->done_done = 1;
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
                       H2_STRM_MSG(stream, "task_done, in hold"));
         if (stream->input) {
             h2_beam_leave(stream->input);
         }
-        stream_joined(m, stream);
+        ms_stream_joined(m, stream);
     }
     else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) {
-        ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,   
+        ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c,   
                       H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
         ap_assert("stream should not be in spurge" == NULL);
     }
     else {
-        ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518)
+        ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, APLOGNO(03518)
                       "h2_mplx(%s): task_done, stream not found", 
                       task->id);
         ap_assert("stream should still be available" == NULL);
     }
 }
 
-void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
+void h2_mplx_s_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
 {
     H2_MPLX_ENTER_ALWAYS(m);
 
     --m->tasks_active;
-    task_done(m, task);
+    s_task_done(m, task);
     
     if (m->join_wait) {
         apr_thread_cond_signal(m->join_wait);
     }
     if (ptask) {
         /* caller wants another task */
-        *ptask = next_stream_task(m);
+        *ptask = s_next_stream_task(m);
     }
-    register_if_needed(m);
+    ms_register_if_needed(m, 0);
 
     H2_MPLX_LEAVE(m);
 }
@@ -858,7 +874,7 @@ void h2_mplx_task_done(h2_mplx *m, h2_ta
  * h2_mplx DoS protection
  ******************************************************************************/
 
-static int timed_out_busy_iter(void *data, void *val)
+static int m_timed_out_busy_iter(void *data, void *val)
 {
     stream_iter_ctx *ctx = data;
     h2_stream *stream = val;
@@ -871,17 +887,17 @@ static int timed_out_busy_iter(void *dat
     return 1;
 }
 
-static h2_stream *get_timed_out_busy_stream(h2_mplx *m) 
+static h2_stream *m_get_timed_out_busy_stream(h2_mplx *m) 
 {
     stream_iter_ctx ctx;
     ctx.m = m;
     ctx.stream = NULL;
     ctx.now = apr_time_now();
-    h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
+    h2_ihash_iter(m->streams, m_timed_out_busy_iter, &ctx);
     return ctx.stream;
 }
 
-static int latest_repeatable_unsubmitted_iter(void *data, void *val)
+static int m_latest_repeatable_unsubmitted_iter(void *data, void *val)
 {
     stream_iter_ctx *ctx = data;
     h2_stream *stream = val;
@@ -907,7 +923,7 @@ leave:
     return 1;
 }
 
-static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m) 
+static apr_status_t m_assess_task_to_throttle(h2_task **ptask, h2_mplx *m) 
 {
     stream_iter_ctx ctx;
     
@@ -917,7 +933,7 @@ static apr_status_t assess_task_to_throt
     ctx.m = m;
     ctx.stream = NULL;
     ctx.count = 0;
-    h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
+    h2_ihash_iter(m->streams, m_latest_repeatable_unsubmitted_iter, &ctx);
     if (m->tasks_active - ctx.count > m->limit_active) {
         /* we are above the limit of running tasks, accounting for the ones
          * already throttled. */
@@ -926,7 +942,7 @@ static apr_status_t assess_task_to_throt
             return APR_EAGAIN;
         }
         /* above limit, be seeing no candidate for easy throttling */
-        if (get_timed_out_busy_stream(m)) {
+        if (m_get_timed_out_busy_stream(m)) {
             /* Too many busy workers, unable to cancel enough streams
              * and with a busy, timed out stream, we tell the client
              * to go away... */
@@ -936,7 +952,7 @@ static apr_status_t assess_task_to_throt
     return APR_SUCCESS;
 }
 
-static apr_status_t unschedule_slow_tasks(h2_mplx *m) 
+static apr_status_t m_unschedule_slow_tasks(h2_mplx *m) 
 {
     h2_task *task;
     apr_status_t rv;
@@ -944,7 +960,7 @@ static apr_status_t unschedule_slow_task
     /* Try to get rid of streams that occupy workers. Look for safe requests
      * that are repeatable. If none found, fail the connection.
      */
-    while (APR_EAGAIN == (rv = assess_task_to_throttle(&task, m))) {
+    while (APR_EAGAIN == (rv = m_assess_task_to_throttle(&task, m))) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, 
                       "h2_mplx(%s): unschedule, resetting task for redo later",
                       task->id);
@@ -955,7 +971,7 @@ static apr_status_t unschedule_slow_task
     return rv;
 }
 
-static apr_status_t mplx_be_happy(h2_mplx *m)
+static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task)
 {
     apr_time_t now;            
 
@@ -967,14 +983,14 @@ static apr_status_t mplx_be_happy(h2_mpl
         m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
         m->last_mood_change = now;
         m->irritations_since = 0;
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
                       "h2_mplx(%ld): mood update, increasing worker limit to %d",
                       m->id, m->limit_active);
     }
     return APR_SUCCESS;
 }
 
-static apr_status_t mplx_be_annoyed(h2_mplx *m)
+static apr_status_t m_be_annoyed(h2_mplx *m)
 {
     apr_status_t status = APR_SUCCESS;
     apr_time_t now;            
@@ -1005,12 +1021,12 @@ static apr_status_t mplx_be_annoyed(h2_m
     }
     
     if (m->tasks_active > m->limit_active) {
-        status = unschedule_slow_tasks(m);
+        status = m_unschedule_slow_tasks(m);
     }
     return status;
 }
 
-apr_status_t h2_mplx_idle(h2_mplx *m)
+apr_status_t h2_mplx_m_idle(h2_mplx *m)
 {
     apr_status_t status = APR_SUCCESS;
     apr_size_t scount;
@@ -1032,7 +1048,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
              * of busy workers we allow for this connection until it
              * well behaves.
              */
-            status = mplx_be_annoyed(m);
+            status = m_be_annoyed(m);
         }
         else if (!h2_iq_empty(m->q)) {
             ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
@@ -1062,14 +1078,14 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
                                   h2_beam_is_closed(stream->output),
                                   (long)h2_beam_get_buffered(stream->output));
                     h2_ihash_add(m->streams, stream);
-                    check_data_for(m, stream, 0);
+                    mst_check_data_for(m, stream, 1);
                     stream->out_checked = 1;
                     status = APR_EAGAIN;
                 }
             }
         }
     }
-    register_if_needed(m);
+    ms_register_if_needed(m, 1);
 
     H2_MPLX_LEAVE(m);
     return status;
@@ -1079,14 +1095,13 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
  * mplx master events dispatching
  ******************************************************************************/
 
-int h2_mplx_has_master_events(h2_mplx *m)
+int h2_mplx_m_has_master_events(h2_mplx *m)
 {
     return apr_atomic_read32(&m->event_pending) > 0;
 }
 
-apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, 
-                                            stream_ev_callback *on_resume, 
-                                            void *on_ctx)
+apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume, 
+                                              void *on_ctx)
 {
     h2_stream *stream;
     int n, id;
@@ -1096,8 +1111,8 @@ apr_status_t h2_mplx_dispatch_master_eve
     apr_atomic_set32(&m->event_pending, 0);
 
     /* update input windows for streams */
-    h2_ihash_iter(m->streams, report_consumption_iter, m);    
-    purge_streams(m, 1);
+    h2_ihash_iter(m->streams, m_report_consumption_iter, m);    
+    m_purge_streams(m, 1);
     
     n = h2_ififo_count(m->readyq);
     while (n > 0 
@@ -1112,13 +1127,13 @@ apr_status_t h2_mplx_dispatch_master_eve
     return APR_SUCCESS;
 }
 
-apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream)
+apr_status_t h2_mplx_m_keep_active(h2_mplx *m, h2_stream *stream)
 {
-    check_data_for(m, stream, 1);
+    mst_check_data_for(m, stream, 0);
     return APR_SUCCESS;
 }
 
-int h2_mplx_awaits_data(h2_mplx *m)
+int h2_mplx_m_awaits_data(h2_mplx *m)
 {
     int waiting = 1;
      
@@ -1135,7 +1150,7 @@ int h2_mplx_awaits_data(h2_mplx *m)
     return waiting;
 }
 
-apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id)
+apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id)
 {
     h2_stream *stream;
     apr_status_t status = APR_SUCCESS;
@@ -1143,7 +1158,7 @@ apr_status_t h2_mplx_client_rst(h2_mplx
     H2_MPLX_ENTER_ALWAYS(m);
     stream = h2_ihash_get(m->streams, stream_id);
     if (stream && stream->task) {
-        status = mplx_be_annoyed(m);
+        status = m_be_annoyed(m);
     }
     H2_MPLX_LEAVE(m);
     return status;
diff -up httpd-2.4.33/modules/http2/h2_mplx.h httpd-2.4.46/modules/http2/h2_mplx.h
--- httpd-2.4.33/modules/http2/h2_mplx.h	2020-08-11 15:45:10.513094213 +0200
+++ httpd-2.4.46/modules/http2/h2_mplx.h	2020-07-08 13:53:48.000000000 +0200
@@ -31,8 +31,10 @@
  * queued in the multiplexer. If a task thread tries to write more
  * data, it is blocked until space becomes available.
  *
- * Writing input is never blocked. In order to use flow control on the input,
- * the mplx can be polled for input data consumption.
+ * Naming Convention: 
+ * "h2_mplx_m_" are methods only to be called by the main connection
+ * "h2_mplx_s_" are method only to be called by a secondary connection
+ * "h2_mplx_t_" are method only to be called by a task handler (can be master or secondary)
  */
 
 struct apr_pool_t;
@@ -88,25 +90,23 @@ struct h2_mplx {
     apr_size_t stream_max_mem;
     
     apr_pool_t *spare_io_pool;
-    apr_array_header_t *spare_slaves; /* spare slave connections */
+    apr_array_header_t *spare_secondary; /* spare secondary connections */
     
     struct h2_workers *workers;
 };
 
-
-
 /*******************************************************************************
- * Object lifecycle and information.
+ * From the main connection processing: h2_mplx_m_*
  ******************************************************************************/
 
-apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s);
+apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s);
 
 /**
  * Create the multiplexer for the given HTTP2 session. 
  * Implicitly has reference count 1.
  */
-h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *master, 
-                        struct h2_workers *workers);
+h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *master, 
+                          struct h2_workers *workers);
 
 /**
  * Decreases the reference counter of this mplx and waits for it
@@ -116,26 +116,14 @@ h2_mplx *h2_mplx_create(conn_rec *c, ser
  * @param m the mplx to be released and destroyed
  * @param wait condition var to wait on for ref counter == 0
  */ 
-void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
-
-apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask);
-
-void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
+void h2_mplx_m_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
 
 /**
  * Shut down the multiplexer gracefully. Will no longer schedule new streams
  * but let the ongoing ones finish normally.
  * @return the highest stream id being/been processed
  */
-int h2_mplx_shutdown(h2_mplx *m);
-
-int h2_mplx_is_busy(h2_mplx *m);
-
-/*******************************************************************************
- * IO lifetime of streams.
- ******************************************************************************/
-
-struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id);
+int h2_mplx_m_shutdown(h2_mplx *m);
 
 /**
  * Notifies mplx that a stream has been completely handled on the main
@@ -144,20 +132,16 @@ struct h2_stream *h2_mplx_stream_get(h2_
  * @param m the mplx itself
  * @param stream the stream ready for cleanup
  */
-apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
+apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
 
 /**
  * Waits on output data from any stream in this session to become available. 
  * Returns APR_TIMEUP if no data arrived in the given time.
  */
-apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
-                                 struct apr_thread_cond_t *iowait);
-
-apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream);
+apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
+                                   struct apr_thread_cond_t *iowait);
 
-/*******************************************************************************
- * Stream processing.
- ******************************************************************************/
+apr_status_t h2_mplx_m_keep_active(h2_mplx *m, struct h2_stream *stream);
 
 /**
  * Process a stream request.
@@ -168,8 +152,8 @@ apr_status_t h2_mplx_keep_active(h2_mplx
  * @param cmp the stream priority compare function
  * @param ctx context data for the compare function
  */
-apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream, 
-                             h2_stream_pri_cmp *cmp, void *ctx);
+apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream, 
+                               h2_stream_pri_cmp *cmp, void *ctx);
 
 /**
  * Stream priorities have changed, reschedule pending requests.
@@ -178,7 +162,7 @@ apr_status_t h2_mplx_process(h2_mplx *m,
  * @param cmp the stream priority compare function
  * @param ctx context data for the compare function
  */
-apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
+apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
 
 typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
 
@@ -186,7 +170,7 @@ typedef apr_status_t stream_ev_callback(
  * Check if the multiplexer has events for the master connection pending.
  * @return != 0 iff there are events pending
  */
-int h2_mplx_has_master_events(h2_mplx *m);
+int h2_mplx_m_has_master_events(h2_mplx *m);
 
 /**
  * Dispatch events for the master connection, such as
@@ -194,108 +178,46 @@ int h2_mplx_has_master_events(h2_mplx *m
  * @param on_resume new output data has arrived for a suspended stream 
  * @param ctx user supplied argument to invocation.
  */
-apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, 
-                                            stream_ev_callback *on_resume, 
-                                            void *ctx);
+apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume, 
+                                              void *ctx);
 
-int h2_mplx_awaits_data(h2_mplx *m);
+int h2_mplx_m_awaits_data(h2_mplx *m);
 
 typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
 
-apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
+apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
 
-apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id);
-
-/*******************************************************************************
- * Output handling of streams.
- ******************************************************************************/
+apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id);
 
 /**
- * Opens the output for the given stream with the specified response.
+ * Master connection has entered idle mode.
+ * @param m the mplx instance of the master connection
+ * @return != SUCCESS iff connection should be terminated
  */
-apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id,
-                              struct h2_bucket_beam *beam);
+apr_status_t h2_mplx_m_idle(h2_mplx *m);
 
 /*******************************************************************************
- * h2_mplx list Manipulation.
+ * From a secondary connection processing: h2_mplx_s_*
  ******************************************************************************/
+apr_status_t h2_mplx_s_pop_task(h2_mplx *m, struct h2_task **ptask);
+void h2_mplx_s_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
 
-/**
- * The magic pointer value that indicates the head of a h2_mplx list
- * @param  b The mplx list
- * @return The magic pointer value
- */
-#define H2_MPLX_LIST_SENTINEL(b)	APR_RING_SENTINEL((b), h2_mplx, link)
-
-/**
- * Determine if the mplx list is empty
- * @param b The list to check
- * @return true or false
- */
-#define H2_MPLX_LIST_EMPTY(b)	APR_RING_EMPTY((b), h2_mplx, link)
-
-/**
- * Return the first mplx in a list
- * @param b The list to query
- * @return The first mplx in the list
- */
-#define H2_MPLX_LIST_FIRST(b)	APR_RING_FIRST(b)
-
-/**
- * Return the last mplx in a list
- * @param b The list to query
- * @return The last mplx int he list
- */
-#define H2_MPLX_LIST_LAST(b)	APR_RING_LAST(b)
-
-/**
- * Insert a single mplx at the front of a list
- * @param b The list to add to
- * @param e The mplx to insert
- */
-#define H2_MPLX_LIST_INSERT_HEAD(b, e) do {				\
-h2_mplx *ap__b = (e);                                        \
-APR_RING_INSERT_HEAD((b), ap__b, h2_mplx, link);	\
-} while (0)
-
-/**
- * Insert a single mplx at the end of a list
- * @param b The list to add to
- * @param e The mplx to insert
- */
-#define H2_MPLX_LIST_INSERT_TAIL(b, e) do {				\
-h2_mplx *ap__b = (e);					\
-APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link);	\
-} while (0)
+/*******************************************************************************
+ * From a h2_task owner: h2_mplx_s_*
+ * (a task is transfered from master to secondary connection and back in
+ * its normal lifetime).
+ ******************************************************************************/
 
 /**
- * Get the next mplx in the list
- * @param e The current mplx
- * @return The next mplx
- */
-#define H2_MPLX_NEXT(e)	APR_RING_NEXT((e), link)
-/**
- * Get the previous mplx in the list
- * @param e The current mplx
- * @return The previous mplx
+ * Opens the output for the given stream with the specified response.
  */
-#define H2_MPLX_PREV(e)	APR_RING_PREV((e), link)
+apr_status_t h2_mplx_t_out_open(h2_mplx *mplx, int stream_id,
+                                struct h2_bucket_beam *beam);
 
 /**
- * Remove a mplx from its list
- * @param e The mplx to remove
+ * Get the stream that belongs to the given task.
  */
-#define H2_MPLX_REMOVE(e)	APR_RING_REMOVE((e), link)
-
-/*******************************************************************************
- * h2_mplx DoS protection
- ******************************************************************************/
+struct h2_stream *h2_mplx_t_stream_get(h2_mplx *m, struct h2_task *task);
 
-/**
- * Master connection has entered idle mode.
- * @param m the mplx instance of the master connection
- * @return != SUCCESS iff connection should be terminated
- */
-apr_status_t h2_mplx_idle(h2_mplx *m);
 
 #endif /* defined(__mod_h2__h2_mplx__) */
Only in httpd-2.4.33/modules/http2/: h2_ngn_shed.c
Only in httpd-2.4.33/modules/http2/: h2_ngn_shed.h
diff -up httpd-2.4.33/modules/http2/h2_proxy_session.c httpd-2.4.46/modules/http2/h2_proxy_session.c
--- httpd-2.4.33/modules/http2/h2_proxy_session.c	2020-08-11 15:45:10.513094213 +0200
+++ httpd-2.4.46/modules/http2/h2_proxy_session.c	2020-06-08 09:30:49.000000000 +0200
@@ -64,6 +64,121 @@ static apr_status_t check_suspended(h2_p
 static void stream_resume(h2_proxy_stream *stream);
 static apr_status_t submit_trailers(h2_proxy_stream *stream);
 
+/*
+ * The H2_PING connection sub-state: a state independant of the H2_SESSION state
+ * of the connection:
+ * - H2_PING_ST_NONE: no interference with request handling, ProxyTimeout in effect.
+ *   When entered, all suspended streams are unsuspended again.
+ * - H2_PING_ST_AWAIT_ANY: new requests are suspended, a possibly configured "ping"
+ *   timeout is in effect. Any frame received transits to H2_PING_ST_NONE.
+ * - H2_PING_ST_AWAIT_PING: same as above, but only a PING frame transits 
+ *   to H2_PING_ST_NONE.
+ *
+ * An AWAIT state is entered on a new connection or when re-using a connection and
+ * the last frame received has been some time ago. The latter sends a PING frame
+ * and insists on an answer, the former is satisfied by any frame received from the
+ * backend.
+ *
+ * This works for new connections as there is always at least one SETTINGS frame
+ * that the backend sends. When re-using connection, we send a PING and insist on
+ * receiving one back, as there might be frames in our connection buffers from
+ * some time ago. Since some servers have protections against PING flooding, we
+ * only ever have one PING unanswered.
+ *
+ * Requests are suspended while in a PING state, as we do not want to send data
+ * before we can be reasonably sure that the connection is working (at least on
+ * the h2 protocol level). This also means that the session can do blocking reads
+ * when expecting PING answers.
+ */
+static void set_ping_timeout(h2_proxy_session *session)
+{
+    if (session->ping_timeout != -1 && session->save_timeout == -1) {
+        apr_socket_t *socket = NULL;
+
+        socket = ap_get_conn_socket(session->c);
+        if (socket) {
+            apr_socket_timeout_get(socket, &session->save_timeout);
+            apr_socket_timeout_set(socket, session->ping_timeout);
+        }
+    }
+}
+
+static void unset_ping_timeout(h2_proxy_session *session)
+{
+    if (session->save_timeout != -1) {
+        apr_socket_t *socket = NULL;
+
+        socket = ap_get_conn_socket(session->c);
+        if (socket) {
+            apr_socket_timeout_set(socket, session->save_timeout);
+            session->save_timeout = -1;
+        }
+    }
+}
+
+static void enter_ping_state(h2_proxy_session *session, h2_ping_state_t state)
+{
+    if (session->ping_state == state) return;
+    switch (session->ping_state) {
+    case H2_PING_ST_NONE:
+        /* leaving NONE, enforce timeout, send frame maybe */
+        if (H2_PING_ST_AWAIT_PING == state) {
+            unset_ping_timeout(session);
+            nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
+        }
+        set_ping_timeout(session);
+        session->ping_state = state;
+        break;
+    default:
+        /* no switching between the != NONE states */
+        if (H2_PING_ST_NONE == state) {
+            session->ping_state = state;
+            unset_ping_timeout(session);
+            ping_arrived(session);
+        }
+        break;
+    }
+}
+
+static void ping_new_session(h2_proxy_session *session, proxy_conn_rec *p_conn)
+{
+    session->save_timeout = -1;
+    session->ping_timeout = (p_conn->worker->s->ping_timeout_set?
+                             p_conn->worker->s->ping_timeout : -1);
+    session->ping_state = H2_PING_ST_NONE;
+    enter_ping_state(session, H2_PING_ST_AWAIT_ANY);
+}
+
+static void ping_reuse_session(h2_proxy_session *session)
+{
+    if (H2_PING_ST_NONE == session->ping_state) {
+        apr_interval_time_t age = apr_time_now() - session->last_frame_received;
+        if (age > apr_time_from_sec(1)) {
+            enter_ping_state(session, H2_PING_ST_AWAIT_PING);
+        }
+    }
+}
+
+static void ping_ev_frame_received(h2_proxy_session *session, const nghttp2_frame *frame)
+{
+    session->last_frame_received = apr_time_now();
+    switch (session->ping_state) {
+    case H2_PING_ST_NONE:
+        /* nop */
+        break;
+    case H2_PING_ST_AWAIT_ANY:
+        enter_ping_state(session, H2_PING_ST_NONE);
+        break;
+    case H2_PING_ST_AWAIT_PING:
+        if (NGHTTP2_PING == frame->hd.type) {
+            enter_ping_state(session, H2_PING_ST_NONE);
+        }
+        /* we may receive many other frames while we are waiting for the
+         * PING answer. They may come all from our connection buffers and
+         * say nothing about the current state of the backend. */
+        break;
+    }
+}
 
 static apr_status_t proxy_session_pre_close(void *theconn)
 {
@@ -154,7 +269,8 @@ static int on_frame_recv(nghttp2_session
                       session->id, buffer);
     }
 
-    session->last_frame_received = apr_time_now();
+    ping_ev_frame_received(session, frame);
+    /* Action for frame types: */
     switch (frame->hd.type) {
         case NGHTTP2_HEADERS:
             stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
@@ -195,10 +311,6 @@ static int on_frame_recv(nghttp2_session
             stream_resume(stream);
             break;
         case NGHTTP2_PING:
-            if (session->check_ping) {
-                session->check_ping = 0;
-                ping_arrived(session);
-            }
             break;
         case NGHTTP2_PUSH_PROMISE:
             break;
@@ -499,7 +611,7 @@ static ssize_t stream_request_data(nghtt
         return NGHTTP2_ERR_CALLBACK_FAILURE;
     }
     
-    if (stream->session->check_ping) {
+    if (stream->session->ping_state != H2_PING_ST_NONE) {
         /* suspend until we hear from the other side */
         stream->waiting_on_ping = 1;
         status = APR_EAGAIN;
@@ -654,18 +766,13 @@ h2_proxy_session *h2_proxy_session_setup
         nghttp2_option_del(option);
         nghttp2_session_callbacks_del(cbs);
 
+        ping_new_session(session, p_conn);
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362)
                       "setup session for %s", p_conn->hostname);
     }
     else {
         h2_proxy_session *session = p_conn->data;
-        if (!session->check_ping) {
-            apr_interval_time_t age = apr_time_now() - session->last_frame_received;
-            if (age > apr_time_from_sec(1)) {
-                session->check_ping = 1;
-                nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
-            }
-        }
+        ping_reuse_session(session);
     }
     return p_conn->data;
 }
@@ -902,7 +1009,7 @@ static apr_status_t h2_proxy_session_rea
         apr_socket_t *socket = NULL;
         apr_time_t save_timeout = -1;
         
-        if (block) {
+        if (block && timeout > 0) {
             socket = ap_get_conn_socket(session->c);
             if (socket) {
                 apr_socket_timeout_get(socket, &save_timeout);
@@ -974,6 +1081,14 @@ static void stream_resume(h2_proxy_strea
     dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
 }
 
+static int is_waiting_for_backend(h2_proxy_session *session)
+{
+    return ((session->ping_state != H2_PING_ST_NONE) 
+            || ((session->suspended->nelts <= 0)
+                && !nghttp2_session_want_write(session->ngh2)
+                && nghttp2_session_want_read(session->ngh2)));
+}
+
 static apr_status_t check_suspended(h2_proxy_session *session)
 {
     h2_proxy_stream *stream;
@@ -1428,7 +1543,22 @@ run_loop:
             break;
             
         case H2_PROXYS_ST_WAIT:
-            if (check_suspended(session) == APR_EAGAIN) {
+            if (is_waiting_for_backend(session)) {
+                /* we can do a blocking read with the default timeout (as
+                 * configured via ProxyTimeout in our socket. There is
+                 * nothing we want to send or check until we get more data
+                 * from the backend. */
+                status = h2_proxy_session_read(session, 1, 0);
+                if (status == APR_SUCCESS) {
+                    have_read = 1;
+                    dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
+                }
+                else {
+                    dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL);
+                    return status;
+                }
+            }
+            else if (check_suspended(session) == APR_EAGAIN) {
                 /* no stream has become resumed. Do a blocking read with
                  * ever increasing timeouts... */
                 if (session->wait_timeout < 25) {
diff -up httpd-2.4.33/modules/http2/h2_proxy_session.h httpd-2.4.46/modules/http2/h2_proxy_session.h
--- httpd-2.4.33/modules/http2/h2_proxy_session.h	2020-08-11 15:45:10.429093673 +0200
+++ httpd-2.4.46/modules/http2/h2_proxy_session.h	2020-06-08 09:30:49.000000000 +0200
@@ -60,6 +60,11 @@ typedef enum {
     H2_PROXYS_EV_PRE_CLOSE,        /* connection will close after this */
 } h2_proxys_event_t;
 
+typedef enum {
+    H2_PING_ST_NONE,               /* normal connection mode, ProxyTimeout rules */
+    H2_PING_ST_AWAIT_ANY,          /* waiting for any frame from backend */
+    H2_PING_ST_AWAIT_PING,         /* waiting for PING frame from backend */
+} h2_ping_state_t;
 
 typedef struct h2_proxy_session h2_proxy_session;
 typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
@@ -74,7 +79,6 @@ struct h2_proxy_session {
     nghttp2_session *ngh2;   /* the nghttp2 session itself */
     
     unsigned int aborted : 1;
-    unsigned int check_ping : 1;
     unsigned int h2_front : 1; /* if front-end connection is HTTP/2 */
 
     h2_proxy_request_done *done;
@@ -94,6 +98,10 @@ struct h2_proxy_session {
     
     apr_bucket_brigade *input;
     apr_bucket_brigade *output;
+
+    h2_ping_state_t ping_state;
+    apr_time_t ping_timeout;
+    apr_time_t save_timeout;
 };
 
 h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
diff -up httpd-2.4.33/modules/http2/h2_proxy_util.h httpd-2.4.46/modules/http2/h2_proxy_util.h
--- httpd-2.4.33/modules/http2/h2_proxy_util.h	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/h2_proxy_util.h	2020-02-21 01:33:40.000000000 +0100
@@ -185,7 +185,7 @@ struct h2_proxy_request {
     
     apr_time_t request_time;
     
-    unsigned int chunked : 1;   /* iff requst body needs to be forwarded as chunked */
+    unsigned int chunked : 1;   /* iff request body needs to be forwarded as chunked */
     unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
 };
 
diff -up httpd-2.4.33/modules/http2/h2_push.c httpd-2.4.46/modules/http2/h2_push.c
--- httpd-2.4.33/modules/http2/h2_push.c	2020-08-11 15:45:10.353093185 +0200
+++ httpd-2.4.46/modules/http2/h2_push.c	2020-07-29 14:33:53.000000000 +0200
@@ -464,33 +464,6 @@ apr_array_header_t *h2_push_collect(apr_
     return NULL;
 }
 
-/*******************************************************************************
- * push diary 
- *
- * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
- *   connection. It records a hash value from the absolute URL of the resource
- *   pushed.
- * - Lacking openssl, it uses 'apr_hashfunc_default' for the value
- * - with openssl, it uses SHA256 to calculate the hash value
- * - whatever the method to generate the hash, the diary keeps a maximum of 64
- *   bits per hash, limiting the memory consumption to about 
- *      H2PushDiarySize * 8 
- *   bytes. Entries are sorted by most recently used and oldest entries are
- *   forgotten first.
- * - Clients can initialize/replace the push diary by sending a 'Cache-Digest'
- *   header. Currently, this is the base64url encoded value of the cache digest
- *   as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
- *   This draft can be expected to evolve and the definition of the header
- *   will be added there and refined.
- * - The cache digest header is a Golomb Coded Set of hash values, but it may
- *   limit the amount of bits per hash value even further. For a good description
- *   of GCS, read here:
- *      http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters
- * - The means that the push diary might be initialized with hash values of much
- *   less than 64 bits, leading to more false positives, but smaller digest size.
- ******************************************************************************/
- 
- 
 #define GCSLOG_LEVEL   APLOG_TRACE1
 
 typedef struct h2_push_diary_entry {
@@ -617,38 +590,48 @@ static int h2_push_diary_find(h2_push_di
     return -1;
 }
 
-static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx)
+static void move_to_last(h2_push_diary *diary, apr_size_t idx)
 {
     h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
     h2_push_diary_entry e;
-    apr_size_t lastidx = diary->entries->nelts-1;
+    int lastidx;
     
+    /* Move an existing entry to the last place */
+    if (diary->entries->nelts <= 0)
+        return;
+
     /* move entry[idx] to the end */
+    lastidx = diary->entries->nelts - 1;
     if (idx < lastidx) {
         e =  entries[idx];
-        memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx));
+        memmove(entries+idx, entries+idx+1, sizeof(h2_push_diary_entry) * (lastidx - idx));
         entries[lastidx] = e;
     }
-    return &entries[lastidx];
 }
 
-static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
+static void remove_first(h2_push_diary *diary)
 {
-    h2_push_diary_entry *ne;
+    h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
+    int lastidx;
     
-    if (diary->entries->nelts < diary->N) {
-        /* append a new diary entry at the end */
-        APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
-        ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry);
-    }
-    else {
-        /* replace content with new digest. keeps memory usage constant once diary is full */
-        ne = move_to_last(diary, 0);
-        *ne = *e;
+    /* move remaining entries to index 0 */
+    lastidx = diary->entries->nelts - 1;
+    if (lastidx > 0) {
+        --diary->entries->nelts;
+        memmove(entries, entries+1, sizeof(h2_push_diary_entry) * diary->entries->nelts);
     }
+}
+
+static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
+{
+    while (diary->entries->nelts >= diary->N) {
+        remove_first(diary);
+    }
+    /* append a new diary entry at the end */
+    APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
     /* Intentional no APLOGNO */
     ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
-                  "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash);
+                  "push_diary_append: %"APR_UINT64_T_HEX_FMT, e->hash);
 }
 
 apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
@@ -691,30 +674,12 @@ apr_array_header_t *h2_push_collect_upda
                                            const struct h2_request *req, 
                                            const struct h2_headers *res)
 {
-    h2_session *session = stream->session;
-    const char *cache_digest = apr_table_get(req->headers, "Cache-Digest");
     apr_array_header_t *pushes;
-    apr_status_t status;
     
-    if (cache_digest && session->push_diary) {
-        status = h2_push_diary_digest64_set(session->push_diary, req->authority, 
-                                            cache_digest, stream->pool);
-        if (status != APR_SUCCESS) {
-            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
-                          H2_SSSN_LOG(APLOGNO(03057), session,
-                          "push diary set from Cache-Digest: %s"), cache_digest);
-        }
-    }
     pushes = h2_push_collect(stream->pool, req, stream->push_policy, res);
     return h2_push_diary_update(stream->session, pushes);
 }
 
-static apr_int32_t h2_log2inv(unsigned char log2)
-{
-    return log2? (1 << log2) : 1;
-}
-
-
 typedef struct {
     h2_push_diary *diary;
     unsigned char log2p;
@@ -829,16 +794,11 @@ apr_status_t h2_push_diary_digest_get(h2
     apr_size_t hash_count;
     
     nelts = diary->entries->nelts;
-    
-    if (nelts > APR_UINT32_MAX) {
-        /* should not happen */
-        return APR_ENOTIMPL;
-    }
     N = ceil_power_of_2(nelts);
     log2n = h2_log2(N);
     
     /* Now log2p is the max number of relevant bits, so that
-     * log2p + log2n == mask_bits. We can uise a lower log2p
+     * log2p + log2n == mask_bits. We can use a lower log2p
      * and have a shorter set encoding...
      */
     log2pmax = h2_log2(ceil_power_of_2(maxP));
@@ -895,166 +855,3 @@ apr_status_t h2_push_diary_digest_get(h2
     return APR_SUCCESS;
 }
 
-typedef struct {
-    h2_push_diary *diary;
-    apr_pool_t *pool;
-    unsigned char log2p;
-    const unsigned char *data;
-    apr_size_t datalen;
-    apr_size_t offset;
-    unsigned int bit;
-    apr_uint64_t last_val;
-} gset_decoder;
-
-static int gset_decode_next_bit(gset_decoder *decoder)
-{
-    if (++decoder->bit >= 8) {
-        if (++decoder->offset >= decoder->datalen) {
-            return -1;
-        }
-        decoder->bit = 0;
-    }
-    return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0;
-}
-
-static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
-{
-    apr_uint64_t flex = 0, fixed = 0, delta;
-    int i;
-    
-    /* read 1 bits until we encounter 0, then read log2n(diary-P) bits.
-     * On a malformed bit-string, this will not fail, but produce results
-     * which are pbly too large. Luckily, the diary will modulo the hash.
-     */
-    while (1) {
-        int bit = gset_decode_next_bit(decoder);
-        if (bit == -1) {
-            return APR_EINVAL;
-        }
-        if (!bit) {
-            break;
-        }
-        ++flex;
-    }
-    
-    for (i = 0; i < decoder->log2p; ++i) {
-        int bit = gset_decode_next_bit(decoder);
-        if (bit == -1) {
-            return APR_EINVAL;
-        }
-        fixed = (fixed << 1) | bit;
-    }
-    
-    delta = (flex << decoder->log2p) | fixed;
-    *phash = delta + decoder->last_val;
-    decoder->last_val = *phash;
-    
-    /* Intentional no APLOGNO */
-    ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool,
-                  "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%"
-                  APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT, 
-                  *phash, delta, (int)flex, fixed);
-                  
-    return APR_SUCCESS;
-}
-
-/**
- * Initialize the push diary by a cache digest as described in 
- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
- * .
- * @param diary the diary to set the digest into
- * @param data the binary cache digest
- * @param len the length of the cache digest
- * @return APR_EINVAL if digest was not successfully parsed
- */
-apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, 
-                                      const char *data, apr_size_t len)
-{
-    gset_decoder decoder;
-    unsigned char log2n, log2p;
-    int N, i;
-    apr_pool_t *pool = diary->entries->pool;
-    h2_push_diary_entry e;
-    apr_status_t status = APR_SUCCESS;
-    
-    if (len < 2) {
-        /* at least this should be there */
-        return APR_EINVAL;
-    }
-    log2n = data[0];
-    log2p = data[1];
-    diary->mask_bits = log2n + log2p;
-    if (diary->mask_bits > 64) {
-        /* cannot handle */
-        return APR_ENOTIMPL;
-    }
-    
-    /* whatever is in the digest, it replaces the diary entries */
-    apr_array_clear(diary->entries);
-    if (!authority || !strcmp("*", authority)) {
-        diary->authority = NULL;
-    }
-    else if (!diary->authority || strcmp(diary->authority, authority)) {
-        diary->authority = apr_pstrdup(diary->entries->pool, authority);
-    }
-
-    N = h2_log2inv(log2n + log2p);
-
-    decoder.diary    = diary;
-    decoder.pool     = pool;
-    decoder.log2p    = log2p;
-    decoder.data     = (const unsigned char*)data;
-    decoder.datalen  = len;
-    decoder.offset   = 1;
-    decoder.bit      = 8;
-    decoder.last_val = 0;
-    
-    diary->N = N;
-    /* Determine effective N we use for storage */
-    if (!N) {
-        /* a totally empty cache digest. someone tells us that she has no
-         * entries in the cache at all. Use our own preferences for N+mask 
-         */
-        diary->N = diary->NMax;
-        return APR_SUCCESS;
-    }
-    else if (N > diary->NMax) {
-        /* Store not more than diary is configured to hold. We open us up
-         * to DOS attacks otherwise. */
-        diary->N = diary->NMax;
-    }
-    
-    /* Intentional no APLOGNO */
-    ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
-                  "h2_push_diary_digest_set: N=%d, log2n=%d, "
-                  "diary->mask_bits=%d, dec.log2p=%d", 
-                  (int)diary->N, (int)log2n, diary->mask_bits, 
-                  (int)decoder.log2p);
-                  
-    for (i = 0; i < diary->N; ++i) {
-        if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) {
-            /* the data may have less than N values */
-            break;
-        }
-        h2_push_diary_append(diary, &e);
-    }
-    
-    /* Intentional no APLOGNO */
-    ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
-                  "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d", 
-                  (int)diary->entries->nelts, diary->mask_bits);
-    return status;
-}
-
-apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, 
-                                        const char *data64url, apr_pool_t *pool)
-{
-    const char *data;
-    apr_size_t len = h2_util_base64url_decode(&data, data64url, pool);
-    /* Intentional no APLOGNO */
-    ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
-                  "h2_push_diary_digest64_set: digest=%s, dlen=%d", 
-                  data64url, (int)len);
-    return h2_push_diary_digest_set(diary, authority, data, len);
-}
-
diff -up httpd-2.4.33/modules/http2/h2_push.h httpd-2.4.46/modules/http2/h2_push.h
--- httpd-2.4.33/modules/http2/h2_push.h	2018-02-10 16:46:12.000000000 +0100
+++ httpd-2.4.46/modules/http2/h2_push.h	2020-07-29 14:33:53.000000000 +0200
@@ -35,6 +35,44 @@ typedef enum {
     H2_PUSH_DIGEST_SHA256
 } h2_push_digest_type;
 
+/*******************************************************************************
+ * push diary 
+ *
+ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
+ *   connection. It records a hash value from the absolute URL of the resource
+ *   pushed.
+ * - Lacking openssl, 
+ * - with openssl, it uses SHA256 to calculate the hash value, otherwise it
+ *   falls back to apr_hashfunc_default()
+ * - whatever the method to generate the hash, the diary keeps a maximum of 64
+ *   bits per hash, limiting the memory consumption to about 
+ *      H2PushDiarySize * 8 
+ *   bytes. Entries are sorted by most recently used and oldest entries are
+ *   forgotten first.
+ * - While useful by itself to avoid duplicated PUSHes on the same connection,
+ *   the original idea was that clients provided a 'Cache-Digest' header with
+ *   the values of *their own* cached resources. This was described in
+ *   <https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/> 
+ *   and some subsequent revisions that tweaked values but kept the overall idea.
+ * - The draft was abandoned by the IETF http-wg, as support from major clients,
+ *   e.g. browsers, was lacking for various reasons.
+ * - For these reasons, mod_h2 abandoned its support for client supplied values
+ *   but keeps the diary. It seems to provide value for applications using PUSH,
+ *   is configurable in size and defaults to a very moderate amount of memory
+ *   used.
+ * - The cache digest header is a Golomb Coded Set of hash values, but it may
+ *   limit the amount of bits per hash value even further. For a good description
+ *   of GCS, read here:
+ *   <http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters>
+ ******************************************************************************/
+ 
+ 
+/*
+ * The push diary is based on the abandoned draft 
+ * <https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/>
+ * that describes how to use golomb filters.
+ */
+
 typedef struct h2_push_diary h2_push_diary;
 
 typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push);
@@ -101,20 +139,4 @@ apr_status_t h2_push_diary_digest_get(h2
                                       int maxP, const char *authority, 
                                       const char **pdata, apr_size_t *plen);
 
-/**
- * Initialize the push diary by a cache digest as described in 
- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
- * .
- * @param diary the diary to set the digest into
- * @param authority the authority to set the data for
- * @param data the binary cache digest
- * @param len the length of the cache digest
- * @return APR_EINVAL if digest was not successfully parsed
- */
-apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, 
-                                      const char *data, apr_size_t len);
-
-apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, 
-                                        const char *data64url, apr_pool_t *pool);
-
 #endif /* defined(__mod_h2__h2_push__) */
diff -up httpd-2.4.33/modules/http2/h2_request.c httpd-2.4.46/modules/http2/h2_request.c
--- httpd-2.4.33/modules/http2/h2_request.c	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/h2_request.c	2020-07-15 16:59:43.000000000 +0200
@@ -47,9 +47,9 @@ typedef struct {
 static int set_h1_header(void *ctx, const char *key, const char *value)
 {
     h1_ctx *x = ctx;
-    x->status = h2_req_add_header(x->headers, x->pool, key, strlen(key), 
-                                  value, strlen(value));
-    return (x->status == APR_SUCCESS)? 1 : 0;
+    int was_added;
+    h2_req_add_header(x->headers, x->pool, key, strlen(key), value, strlen(value), 0, &was_added);
+    return 1;
 }
 
 apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool, 
@@ -99,10 +99,12 @@ apr_status_t h2_request_rcreate(h2_reque
 
 apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool, 
                                    const char *name, size_t nlen,
-                                   const char *value, size_t vlen)
+                                   const char *value, size_t vlen,
+                                   size_t max_field_len, int *pwas_added)
 {
     apr_status_t status = APR_SUCCESS;
     
+    *pwas_added = 0;
     if (nlen <= 0) {
         return status;
     }
@@ -143,8 +145,9 @@ apr_status_t h2_request_add_header(h2_re
         }
     }
     else {
-        /* non-pseudo header, append to work bucket of stream */
-        status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen);
+        /* non-pseudo header, add to table */
+        status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen, 
+                                   max_field_len, pwas_added);
     }
     
     return status;
@@ -156,7 +159,7 @@ apr_status_t h2_request_end_headers(h2_r
     
     /* rfc7540, ch. 8.1.2.3:
      * - if we have :authority, it overrides any Host header 
-     * - :authority MUST be ommited when converting h1->h2, so we
+     * - :authority MUST be omitted when converting h1->h2, so we
      *   might get a stream without, but then Host needs to be there */
     if (!req->authority) {
         const char *host = apr_table_get(req->headers, "Host");
@@ -288,6 +291,9 @@ request_rec *h2_request_create_rec(const
     if (r->method_number == M_GET && r->method[0] == 'H') {
         r->header_only = 1;
     }
+    r->the_request = apr_psprintf(r->pool, "%s %s HTTP/2.0", 
+                                  req->method, req->path ? req->path : "");
+    r->headers_in = apr_table_clone(r->pool, req->headers);
 
     rpath = (req->path ? req->path : "");
     ap_parse_uri(r, rpath);
@@ -304,7 +310,9 @@ request_rec *h2_request_create_rec(const
      */
     r->hostname = NULL;
     ap_update_vhost_from_headers(r);
-    
+    r->protocol = "HTTP/2.0";
+    r->proto_num = HTTP_VERSION(2, 0);
+
     /* we may have switched to another server */
     r->per_dir_config = r->server->lookup_defaults;
     
diff -up httpd-2.4.33/modules/http2/h2_request.h httpd-2.4.46/modules/http2/h2_request.h
--- httpd-2.4.33/modules/http2/h2_request.h	2020-08-11 15:45:10.353093185 +0200
+++ httpd-2.4.46/modules/http2/h2_request.h	2020-07-15 16:59:43.000000000 +0200
@@ -24,7 +24,8 @@ apr_status_t h2_request_rcreate(h2_reque
 
 apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
                                    const char *name, size_t nlen,
-                                   const char *value, size_t vlen);
+                                   const char *value, size_t vlen,
+                                   size_t max_field_len, int *pwas_added);
 
 apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
                                     const char *name, size_t nlen,
diff -up httpd-2.4.33/modules/http2/h2_session.c httpd-2.4.46/modules/http2/h2_session.c
--- httpd-2.4.33/modules/http2/h2_session.c	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/h2_session.c	2020-07-08 13:53:48.000000000 +0200
@@ -106,7 +106,7 @@ static int rst_unprocessed_stream(h2_str
 
 static void cleanup_unprocessed_streams(h2_session *session)
 {
-    h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session);
+    h2_mplx_m_stream_do(session->mplx, rst_unprocessed_stream, session);
 }
 
 static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
@@ -385,7 +385,7 @@ static int on_frame_recv_cb(nghttp2_sess
             break;
         case NGHTTP2_RST_STREAM:
             ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
-                          "h2_stream(%ld-%d): RST_STREAM by client, errror=%d",
+                          "h2_stream(%ld-%d): RST_STREAM by client, error=%d",
                           session->id, (int)frame->hd.stream_id,
                           (int)frame->rst_stream.error_code);
             stream = h2_session_stream_get(session, frame->hd.stream_id);
@@ -397,7 +397,7 @@ static int on_frame_recv_cb(nghttp2_sess
             else {
                 /* A stream reset on a request it sent us. Could happen in a browser
                  * when the user navigates away or cancels loading - maybe. */
-                h2_mplx_client_rst(session->mplx, frame->hd.stream_id);
+                h2_mplx_m_client_rst(session->mplx, frame->hd.stream_id);
                 ++session->streams_reset;
             }
             break;
@@ -467,7 +467,7 @@ static int on_frame_recv_cb(nghttp2_sess
 }
 
 static int h2_session_continue_data(h2_session *session) {
-    if (h2_mplx_has_master_events(session->mplx)) {
+    if (h2_mplx_m_has_master_events(session->mplx)) {
         return 0;
     }
     if (h2_conn_io_needs_flush(&session->io)) {
@@ -729,7 +729,7 @@ static apr_status_t h2_session_shutdown(
          * Remove all streams greater than this number without submitting
          * a RST_STREAM frame, since that should be clear from the GOAWAY
          * we send. */
-        session->local.accepted_max = h2_mplx_shutdown(session->mplx);
+        session->local.accepted_max = h2_mplx_m_shutdown(session->mplx);
         session->local.error = error;
     }
     else {
@@ -779,7 +779,7 @@ static apr_status_t session_cleanup(h2_s
     }
 
     transit(session, trigger, H2_SESSION_ST_CLEANUP);
-    h2_mplx_release_and_join(session->mplx, session->iowait);
+    h2_mplx_m_release_and_join(session->mplx, session->iowait);
     session->mplx = NULL;
 
     ap_assert(session->ngh2);
@@ -800,7 +800,7 @@ static apr_status_t session_pool_cleanup
         /* if the session is still there, now is the last chance
          * to perform cleanup. Normally, cleanup should have happened
          * earlier in the connection pre_close. Main reason is that
-         * any ongoing requests on slave connections might still access
+         * any ongoing requests on secondary connections might still access
          * data which has, at this time, already been freed. An example
          * is mod_ssl that uses request hooks. */
         ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
@@ -893,7 +893,7 @@ apr_status_t h2_session_create(h2_sessio
     session->monitor->on_state_event = on_stream_state_event;
     session->monitor->on_event = on_stream_event;
     
-    session->mplx = h2_mplx_create(c, s, session->pool, workers);
+    session->mplx = h2_mplx_m_create(c, s, session->pool, workers);
     
     /* connection input filter that feeds the session */
     session->cin = h2_filter_cin_create(session);
@@ -1179,7 +1179,7 @@ struct h2_stream *h2_session_push(h2_ses
     stream = h2_session_open_stream(session, nid, is->id);
     if (!stream) {
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, 
-                      H2_STRM_LOG(APLOGNO(03077), stream, 
+                      H2_STRM_LOG(APLOGNO(03077), is,
                       "failed to create stream obj %d"), nid);
         /* kill the push_promise */
         nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid,
@@ -1285,7 +1285,7 @@ apr_status_t h2_session_set_prio(h2_sess
 
         rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, 
-                      ""H2_STRM_LOG(APLOGNO(03203), stream, 
+                      H2_STRM_LOG(APLOGNO(03203), stream, 
                       "PUSH %s, weight=%d, depends=%d, returned=%d"),
                       ptype, ps.weight, ps.stream_id, rv);
         status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
@@ -1415,7 +1415,7 @@ static apr_status_t on_stream_headers(h2
             && (headers->status < 400)
             && (headers->status != 304)
             && h2_session_push_enabled(session)) {
-            /* PUSH is possibe and enabled on server, unless the request
+            /* PUSH is possible and enabled on server, unless the request
              * denies it, submit resources to push */
             s = apr_table_get(headers->notes, H2_PUSH_MODE_NOTE);
             if (!s || strcmp(s, "0")) {
@@ -1552,7 +1552,7 @@ static void h2_session_in_flush(h2_sessi
         if (stream) {
             ap_assert(!stream->scheduled);
             if (h2_stream_prep_processing(stream) == APR_SUCCESS) {
-                h2_mplx_process(session->mplx, stream, stream_pri_cmp, session);
+                h2_mplx_m_process(session->mplx, stream, stream_pri_cmp, session);
             }
             else {
                 h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
@@ -1824,7 +1824,7 @@ static void h2_session_ev_no_io(h2_sessi
                           session->open_streams);
             h2_conn_io_flush(&session->io);
             if (session->open_streams > 0) {
-                if (h2_mplx_awaits_data(session->mplx)) {
+                if (h2_mplx_m_awaits_data(session->mplx)) {
                     /* waiting for at least one stream to produce data */
                     transit(session, "no io", H2_SESSION_ST_WAIT);
                 }
@@ -1983,7 +1983,7 @@ static void on_stream_state_enter(void *
             break;
         case H2_SS_CLEANUP:
             nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL);
-            h2_mplx_stream_cleanup(session->mplx, stream);
+            h2_mplx_m_stream_cleanup(session->mplx, stream);
             break;
         default:
             break;
@@ -2073,7 +2073,7 @@ static void dispatch_event(h2_session *s
 static apr_status_t dispatch_master(h2_session *session) {
     apr_status_t status;
     
-    status = h2_mplx_dispatch_master_events(session->mplx, 
+    status = h2_mplx_m_dispatch_master_events(session->mplx, 
                                             on_stream_resume, session);
     if (status == APR_EAGAIN) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
@@ -2141,7 +2141,7 @@ apr_status_t h2_session_process(h2_sessi
                 break;
                 
             case H2_SESSION_ST_IDLE:
-                if (session->idle_until && (apr_time_now() + session->idle_delay) > session->idle_until) {
+                if (session->idle_until && (now + session->idle_delay) > session->idle_until) {
                     ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
                                   H2_SSSN_MSG(session, "idle, timeout reached, closing"));
                     if (session->idle_delay) {
@@ -2175,7 +2175,7 @@ apr_status_t h2_session_process(h2_sessi
                         session->have_read = 1;
                     }
                     else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
-                        status = h2_mplx_idle(session->mplx);
+                        status = h2_mplx_m_idle(session->mplx);
                         if (status == APR_EAGAIN) {
                             break;
                         }
@@ -2205,7 +2205,7 @@ apr_status_t h2_session_process(h2_sessi
                     /* We wait in smaller increments, using a 1 second timeout.
                      * That gives us the chance to check for MPMQ_STOPPING often. 
                      */
-                    status = h2_mplx_idle(session->mplx);
+                    status = h2_mplx_m_idle(session->mplx);
                     if (status == APR_EAGAIN) {
                         break;
                     }
@@ -2319,7 +2319,7 @@ apr_status_t h2_session_process(h2_sessi
                                   "h2_session: wait for data, %ld micros", 
                                   (long)session->wait_us);
                 }
-                status = h2_mplx_out_trywait(session->mplx, session->wait_us, 
+                status = h2_mplx_m_out_trywait(session->mplx, session->wait_us, 
                                              session->iowait);
                 if (status == APR_SUCCESS) {
                     session->wait_us = 0;
@@ -2356,7 +2356,7 @@ apr_status_t h2_session_process(h2_sessi
             dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL); 
         }
         if (session->reprioritize) {
-            h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session);
+            h2_mplx_m_reprioritize(session->mplx, stream_pri_cmp, session);
             session->reprioritize = 0;
         }
     }
diff -up httpd-2.4.33/modules/http2/h2_session.h httpd-2.4.46/modules/http2/h2_session.h
--- httpd-2.4.33/modules/http2/h2_session.h	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/h2_session.h	2020-07-08 13:53:48.000000000 +0200
@@ -132,7 +132,7 @@ typedef struct h2_session {
     const char *last_status_msg;    /* the one already reported */
     
     struct h2_iqueue *in_pending;   /* all streams with input pending */
-    struct h2_iqueue *in_process;   /* all streams ready for processing on slave */
+    struct h2_iqueue *in_process;   /* all streams ready for processing on a secondary */
 
 } h2_session;
 
diff -up httpd-2.4.33/modules/http2/h2_stream.c httpd-2.4.46/modules/http2/h2_stream.c
--- httpd-2.4.33/modules/http2/h2_stream.c	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/h2_stream.c	2020-07-15 16:59:43.000000000 +0200
@@ -446,7 +446,7 @@ apr_status_t h2_stream_recv_frame(h2_str
                 ap_assert(stream->request == NULL);
                 if (stream->rtmp == NULL) {
                     /* This can only happen, if the stream has received no header
-                     * name/value pairs at all. The lastest nghttp2 version have become
+                     * name/value pairs at all. The latest nghttp2 version have become
                      * pretty good at detecting this early. In any case, we have
                      * to abort the connection here, since this is clearly a protocol error */
                     return APR_EINVAL;
@@ -654,11 +654,14 @@ static void set_error_response(h2_stream
 
 static apr_status_t add_trailer(h2_stream *stream,
                                 const char *name, size_t nlen,
-                                const char *value, size_t vlen)
+                                const char *value, size_t vlen,
+                                size_t max_field_len, int *pwas_added)
 {
     conn_rec *c = stream->session->c;
     char *hname, *hvalue;
+    const char *existing;
 
+    *pwas_added = 0;
     if (nlen == 0 || name[0] == ':') {
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c, 
                       H2_STRM_LOG(APLOGNO(03060), stream, 
@@ -672,8 +675,15 @@ static apr_status_t add_trailer(h2_strea
         stream->trailers = apr_table_make(stream->pool, 5);
     }
     hname = apr_pstrndup(stream->pool, name, nlen);
-    hvalue = apr_pstrndup(stream->pool, value, vlen);
     h2_util_camel_case_header(hname, nlen);
+    existing = apr_table_get(stream->trailers, hname);
+    if (max_field_len 
+        && ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len)) {
+        /* "key: (oldval, )?nval" is too long */
+        return APR_EINVAL;
+    }
+    if (!existing) *pwas_added = 1;
+    hvalue = apr_pstrndup(stream->pool, value, vlen);
     apr_table_mergen(stream->trailers, hname, hvalue);
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, 
                   H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
@@ -686,49 +696,31 @@ apr_status_t h2_stream_add_header(h2_str
                                   const char *value, size_t vlen)
 {
     h2_session *session = stream->session;
-    int error = 0;
-    apr_status_t status;
+    int error = 0, was_added = 0;
+    apr_status_t status = APR_SUCCESS;
     
     if (stream->has_response) {
         return APR_EINVAL;    
     }
-    ++stream->request_headers_added;
+
     if (name[0] == ':') {
         if ((vlen) > session->s->limit_req_line) {
             /* pseudo header: approximation of request line size check */
-            ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,  
-                          H2_STRM_LOG(APLOGNO(10178), stream, 
-                                      "Request pseudo header exceeds "
-                                      "LimitRequestFieldSize: %s"), name);
+            if (!h2_stream_is_ready(stream)) {
+                ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
+                              H2_STRM_LOG(APLOGNO(10178), stream,
+                                          "Request pseudo header exceeds "
+                                          "LimitRequestFieldSize: %s"), name);
+            }
             error = HTTP_REQUEST_URI_TOO_LARGE;
+            goto cleanup;
         }
     }
-    else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
-        /* header too long */
-        ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,  
-                      H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
-                                  "LimitRequestFieldSize: %.*s"),
-                      (int)H2MIN(nlen, 80), name);
-        error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
-    }
-    
-    if (stream->request_headers_added > session->s->limit_req_fields + 4) {
-        /* too many header lines, include 4 pseudo headers */
-        if (stream->request_headers_added 
-            > session->s->limit_req_fields + 4 + 100) {
-            /* yeah, right */
-            h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
-            return APR_ECONNRESET;
-        }
-        ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c, 
-                      H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
-                                  "exceeds LimitRequestFields"));
-        error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
-    }
     
-    if (error) {
-        set_error_response(stream, error);
-        return APR_EINVAL; 
+    if (session->s->limit_req_fields > 0 
+        && stream->request_headers_added > session->s->limit_req_fields) {
+        /* already over limit, count this attempt, but do not take it in */
+        ++stream->request_headers_added;
     }
     else if (H2_SS_IDLE == stream->state) {
         if (!stream->rtmp) {
@@ -736,16 +728,55 @@ apr_status_t h2_stream_add_header(h2_str
                                          NULL, NULL, NULL, NULL, NULL, 0);
         }
         status = h2_request_add_header(stream->rtmp, stream->pool,
-                                       name, nlen, value, vlen);
+                                       name, nlen, value, vlen,
+                                       session->s->limit_req_fieldsize, &was_added);
+        if (was_added) ++stream->request_headers_added;
     }
     else if (H2_SS_OPEN == stream->state) {
-        status = add_trailer(stream, name, nlen, value, vlen);
+        status = add_trailer(stream, name, nlen, value, vlen,
+                             session->s->limit_req_fieldsize, &was_added);
+        if (was_added) ++stream->request_headers_added;
     }
     else {
         status = APR_EINVAL;
+        goto cleanup;
     }
     
-    if (status != APR_SUCCESS) {
+    if (APR_EINVAL == status) {
+        /* header too long */
+        if (!h2_stream_is_ready(stream)) {
+            ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
+                          H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
+                                      "LimitRequestFieldSize: %.*s"),
+                          (int)H2MIN(nlen, 80), name);
+        }
+        error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+        goto cleanup;
+    }
+    
+    if (session->s->limit_req_fields > 0 
+        && stream->request_headers_added > session->s->limit_req_fields) {
+        /* too many header lines */
+        if (stream->request_headers_added > session->s->limit_req_fields + 100) {
+            /* yeah, right, this request is way over the limit, say goodbye */
+            h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
+            return APR_ECONNRESET;
+        }
+        if (!h2_stream_is_ready(stream)) {
+            ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
+                          H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
+                                      "exceeds LimitRequestFields"));
+        }
+        error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+        goto cleanup;
+    }
+    
+cleanup:
+    if (error) {
+        set_error_response(stream, error);
+        return APR_EINVAL; 
+    }
+    else if (status != APR_SUCCESS) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
                       H2_STRM_MSG(stream, "header %s not accepted"), name);
         h2_stream_dispatch(stream, H2_SEV_CANCELLED);
@@ -782,10 +813,12 @@ apr_status_t h2_stream_end_headers(h2_st
         ctx.failed_key = NULL;
         apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL);
         if (ctx.failed_key) {
-            ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,  
-                          H2_STRM_LOG(APLOGNO(10190), stream,"Request header exceeds "
-                                      "LimitRequestFieldSize: %.*s"),
-                          (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
+            if (!h2_stream_is_ready(stream)) {
+                ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,
+                              H2_STRM_LOG(APLOGNO(10230), stream,"Request header exceeds "
+                                          "LimitRequestFieldSize: %.*s"),
+                              (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
+            }
             set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
             /* keep on returning APR_SUCCESS, so that we send a HTTP response and
              * do not RST the stream. */
@@ -903,7 +936,7 @@ apr_status_t h2_stream_out_prepare(h2_st
     
     if (status == APR_EAGAIN) {
         /* TODO: ugly, someone needs to retrieve the response first */
-        h2_mplx_keep_active(stream->session->mplx, stream);
+        h2_mplx_m_keep_active(stream->session->mplx, stream);
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
                       H2_STRM_MSG(stream, "prep, response eagain"));
         return status;
diff -up httpd-2.4.33/modules/http2/h2_stream.h httpd-2.4.46/modules/http2/h2_stream.h
--- httpd-2.4.33/modules/http2/h2_stream.h	2020-08-11 15:45:10.505094163 +0200
+++ httpd-2.4.46/modules/http2/h2_stream.h	2020-02-21 01:33:40.000000000 +0100
@@ -199,7 +199,7 @@ apr_status_t h2_stream_add_header(h2_str
                                   const char *name, size_t nlen,
                                   const char *value, size_t vlen);
                                   
-/* End the contruction of request headers */
+/* End the construction of request headers */
 apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
 
 
diff -up httpd-2.4.33/modules/http2/h2_switch.c httpd-2.4.46/modules/http2/h2_switch.c
--- httpd-2.4.33/modules/http2/h2_switch.c	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/h2_switch.c	2020-06-20 16:21:19.000000000 +0200
@@ -159,7 +159,6 @@ static int h2_protocol_switch(conn_rec *
              * right away.
              */
             ap_remove_input_filter_byhandle(r->input_filters, "http_in");
-            ap_remove_input_filter_byhandle(r->input_filters, "reqtimeout");
             ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
             
             /* Ok, start an h2_conn on this one. */
diff -up httpd-2.4.33/modules/http2/h2_task.c httpd-2.4.46/modules/http2/h2_task.c
--- httpd-2.4.33/modules/http2/h2_task.c	2020-08-11 15:45:10.505094163 +0200
+++ httpd-2.4.46/modules/http2/h2_task.c	2020-07-15 16:17:17.000000000 +0200
@@ -86,7 +86,7 @@ static apr_status_t open_output(h2_task
                   task->request->authority, 
                   task->request->path);
     task->output.opened = 1;
-    return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam);
+    return h2_mplx_t_out_open(task->mplx, task->stream_id, task->output.beam);
 }
 
 static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block)
@@ -126,8 +126,8 @@ static apr_status_t send_out(h2_task *ta
  * request_rec out filter chain) into the h2_mplx for further sending
  * on the master connection. 
  */
-static apr_status_t slave_out(h2_task *task, ap_filter_t* f, 
-                              apr_bucket_brigade* bb)
+static apr_status_t secondary_out(h2_task *task, ap_filter_t* f, 
+                                  apr_bucket_brigade* bb)
 {
     apr_bucket *b;
     apr_status_t rv = APR_SUCCESS;
@@ -175,7 +175,7 @@ send:
             if (APR_SUCCESS == rv) {
                 /* could not write all, buffer the rest */
                 ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405)
-                              "h2_slave_out(%s): saving brigade", task->id);
+                              "h2_secondary_out(%s): saving brigade", task->id);
                 ap_assert(NULL);
                 rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
                 flush = 1;
@@ -189,7 +189,7 @@ send:
     }
 out:
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c, 
-                  "h2_slave_out(%s): slave_out leave", task->id);    
+                  "h2_secondary_out(%s): secondary_out leave", task->id);    
     return rv;
 }
 
@@ -202,14 +202,14 @@ static apr_status_t output_finish(h2_tas
 }
 
 /*******************************************************************************
- * task slave connection filters
+ * task secondary connection filters
  ******************************************************************************/
 
-static apr_status_t h2_filter_slave_in(ap_filter_t* f,
-                                       apr_bucket_brigade* bb,
-                                       ap_input_mode_t mode,
-                                       apr_read_type_e block,
-                                       apr_off_t readbytes)
+static apr_status_t h2_filter_secondary_in(ap_filter_t* f,
+                                           apr_bucket_brigade* bb,
+                                           ap_input_mode_t mode,
+                                           apr_read_type_e block,
+                                           apr_off_t readbytes)
 {
     h2_task *task;
     apr_status_t status = APR_SUCCESS;
@@ -224,7 +224,7 @@ static apr_status_t h2_filter_slave_in(a
 
     if (trace1) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
-                      "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld", 
+                      "h2_secondary_in(%s): read, mode=%d, block=%d, readbytes=%ld", 
                       task->id, mode, block, (long)readbytes);
     }
     
@@ -254,7 +254,7 @@ static apr_status_t h2_filter_slave_in(a
         /* Get more input data for our request. */
         if (trace1) {
             ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
-                          "h2_slave_in(%s): get more data from mplx, block=%d, "
+                          "h2_secondary_in(%s): get more data from mplx, block=%d, "
                           "readbytes=%ld", task->id, block, (long)readbytes);
         }
         if (task->input.beam) {
@@ -267,7 +267,7 @@ static apr_status_t h2_filter_slave_in(a
         
         if (trace1) {
             ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
-                          "h2_slave_in(%s): read returned", task->id);
+                          "h2_secondary_in(%s): read returned", task->id);
         }
         if (APR_STATUS_IS_EAGAIN(status) 
             && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
@@ -306,7 +306,7 @@ static apr_status_t h2_filter_slave_in(a
     if (APR_BRIGADE_EMPTY(task->input.bb)) {
         if (trace1) {
             ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
-                          "h2_slave_in(%s): no data", task->id);
+                          "h2_secondary_in(%s): no data", task->id);
         }
         return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
     }
@@ -334,7 +334,7 @@ static apr_status_t h2_filter_slave_in(a
             buffer[len] = 0;
             if (trace1) {
                 ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
-                              "h2_slave_in(%s): getline: %s",
+                              "h2_secondary_in(%s): getline: %s",
                               task->id, buffer);
             }
         }
@@ -344,7 +344,7 @@ static apr_status_t h2_filter_slave_in(a
          * to support it. Seems to work. */
         ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
                       APLOGNO(03472) 
-                      "h2_slave_in(%s), unsupported READ mode %d", 
+                      "h2_secondary_in(%s), unsupported READ mode %d", 
                       task->id, mode);
         status = APR_ENOTIMPL;
     }
@@ -352,19 +352,19 @@ static apr_status_t h2_filter_slave_in(a
     if (trace1) {
         apr_brigade_length(bb, 0, &bblen);
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
-                      "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen);
+                      "h2_secondary_in(%s): %ld data bytes", task->id, (long)bblen);
     }
     return status;
 }
 
-static apr_status_t h2_filter_slave_output(ap_filter_t* filter,
-                                           apr_bucket_brigade* brigade)
+static apr_status_t h2_filter_secondary_output(ap_filter_t* filter,
+                                               apr_bucket_brigade* brigade)
 {
     h2_task *task = h2_ctx_get_task(filter->c);
     apr_status_t status;
     
     ap_assert(task);
-    status = slave_out(task, filter, brigade);
+    status = secondary_out(task, filter, brigade);
     if (status != APR_SUCCESS) {
         h2_task_rst(task, H2_ERR_INTERNAL_ERROR);
     }
@@ -380,7 +380,7 @@ static apr_status_t h2_filter_parse_h1(a
     /* There are cases where we need to parse a serialized http/1.1 
      * response. One example is a 100-continue answer in serialized mode
      * or via a mod_proxy setup */
-    while (bb && !task->output.sent_response) {
+    while (bb && !task->c->aborted && !task->output.sent_response) {
         status = h2_from_h1_parse_response(task, f, bb);
         ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
                       "h2_task(%s): parsed response", task->id);
@@ -456,9 +456,9 @@ void h2_task_register_hooks(void)
     ap_hook_process_connection(h2_task_process_conn, 
                                NULL, NULL, APR_HOOK_FIRST);
 
-    ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in,
+    ap_register_input_filter("H2_SECONDARY_IN", h2_filter_secondary_in,
                              NULL, AP_FTYPE_NETWORK);
-    ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output,
+    ap_register_output_filter("H2_SECONDARY_OUT", h2_filter_secondary_output,
                               NULL, AP_FTYPE_NETWORK);
     ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1,
                               NULL, AP_FTYPE_NETWORK);
@@ -492,15 +492,15 @@ static int h2_task_pre_conn(conn_rec* c,
     (void)arg;
     if (ctx->task) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-                      "h2_slave(%s), pre_connection, adding filters", c->log_id);
-        ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
+                      "h2_secondary(%s), pre_connection, adding filters", c->log_id);
+        ap_add_input_filter("H2_SECONDARY_IN", NULL, NULL, c);
         ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
-        ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
+        ap_add_output_filter("H2_SECONDARY_OUT", NULL, NULL, c);
     }
     return OK;
 }
 
-h2_task *h2_task_create(conn_rec *slave, int stream_id,
+h2_task *h2_task_create(conn_rec *secondary, int stream_id,
                         const h2_request *req, h2_mplx *m,
                         h2_bucket_beam *input, 
                         apr_interval_time_t timeout,
@@ -509,10 +509,10 @@ h2_task *h2_task_create(conn_rec *slave,
     apr_pool_t *pool;
     h2_task *task;
     
-    ap_assert(slave);
+    ap_assert(secondary);
     ap_assert(req);
 
-    apr_pool_create(&pool, slave->pool);
+    apr_pool_create(&pool, secondary->pool);
     apr_pool_tag(pool, "h2_task");
     task = apr_pcalloc(pool, sizeof(h2_task));
     if (task == NULL) {
@@ -520,7 +520,7 @@ h2_task *h2_task_create(conn_rec *slave,
     }
     task->id          = "000";
     task->stream_id   = stream_id;
-    task->c           = slave;
+    task->c           = secondary;
     task->mplx        = m;
     task->pool        = pool;
     task->request     = req;
@@ -555,37 +555,38 @@ apr_status_t h2_task_do(h2_task *task, a
     task->worker_started = 1;
     
     if (c->master) {
-        /* Each conn_rec->id is supposed to be unique at a point in time. Since
+        /* See the discussion at <https://github.com/icing/mod_h2/issues/195>
+         *
+         * Each conn_rec->id is supposed to be unique at a point in time. Since
          * some modules (and maybe external code) uses this id as an identifier
-         * for the request_rec they handle, it needs to be unique for slave 
+         * for the request_rec they handle, it needs to be unique for secondary 
          * connections also.
-         * The connection id is generated by the MPM and most MPMs use the formula
-         *    id := (child_num * max_threads) + thread_num
-         * which means that there is a maximum id of about
-         *    idmax := max_child_count * max_threads
-         * If we assume 2024 child processes with 2048 threads max, we get
-         *    idmax ~= 2024 * 2048 = 2 ** 22
-         * On 32 bit systems, we have not much space left, but on 64 bit systems
-         * (and higher?) we can use the upper 32 bits without fear of collision.
-         * 32 bits is just what we need, since a connection can only handle so
-         * many streams. 
+         *
+         * The MPM module assigns the connection ids and mod_unique_id is using
+         * that one to generate identifier for requests. While the implementation
+         * works for HTTP/1.x, the parallel execution of several requests per
+         * connection will generate duplicate identifiers on load.
+         * 
+         * The original implementation for secondary connection identifiers used 
+         * to shift the master connection id up and assign the stream id to the 
+         * lower bits. This was cramped on 32 bit systems, but on 64bit there was
+         * enough space.
+         * 
+         * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the
+         * connection id, even on 64bit systems. Therefore collisions in request ids.
+         *
+         * The way master connection ids are generated, there is some space "at the
+         * top" of the lower 32 bits on allmost all systems. If you have a setup
+         * with 64k threads per child and 255 child processes, you live on the edge.
+         *
+         * The new implementation shifts 8 bits and XORs in the worker
+         * id. This will experience collisions with > 256 h2 workers and heavy
+         * load still. There seems to be no way to solve this in all possible 
+         * configurations by mod_h2 alone. 
          */
-        int slave_id, free_bits;
-        
+        task->c->id = (c->master->id << 8)^worker_id;
         task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id, 
                                 task->stream_id);
-        if (sizeof(unsigned long) >= 8) {
-            free_bits = 32;
-            slave_id = task->stream_id;
-        }
-        else {
-            /* Assume we have a more limited number of threads/processes
-             * and h2 workers on a 32-bit system. Use the worker instead
-             * of the stream id. */
-            free_bits = 8;
-            slave_id = worker_id; 
-        }
-        task->c->id = (c->master->id << free_bits)^slave_id;
     }
         
     h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output", 
@@ -600,7 +601,7 @@ apr_status_t h2_task_do(h2_task *task, a
     h2_ctx_create_for(c, task);
     apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id);
 
-    h2_slave_run_pre_connection(c, ap_get_conn_socket(c));            
+    h2_secondary_run_pre_connection(c, ap_get_conn_socket(c));            
 
     task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc);
     if (task->request->serialize) {
@@ -708,7 +709,7 @@ static int h2_task_process_conn(conn_rec
     }
     else {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, 
-                      "slave_conn(%ld): has no task", c->id);
+                      "secondary_conn(%ld): has no task", c->id);
     }
     return DECLINED;
 }
diff -up httpd-2.4.33/modules/http2/h2_task.h httpd-2.4.46/modules/http2/h2_task.h
--- httpd-2.4.33/modules/http2/h2_task.h	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/h2_task.h	2020-07-08 13:53:48.000000000 +0200
@@ -35,7 +35,7 @@
  *
  * Finally, to keep certain connection level filters, such as ourselves and
  * especially mod_ssl ones, from messing with our data, we need a filter
- * of our own to disble those.
+ * of our own to disable those.
  */
 
 struct h2_bucket_beam;
@@ -90,7 +90,7 @@ struct h2_task {
     apr_bucket *eor;
 };
 
-h2_task *h2_task_create(conn_rec *slave, int stream_id,
+h2_task *h2_task_create(conn_rec *secondary, int stream_id,
                         const h2_request *req, struct h2_mplx *m, 
                         struct h2_bucket_beam *input, 
                         apr_interval_time_t timeout,
diff -up httpd-2.4.33/modules/http2/h2_util.c httpd-2.4.46/modules/http2/h2_util.c
--- httpd-2.4.33/modules/http2/h2_util.c	2020-08-11 15:45:10.397093467 +0200
+++ httpd-2.4.46/modules/http2/h2_util.c	2020-07-15 16:59:43.000000000 +0200
@@ -638,15 +638,6 @@ apr_status_t h2_fifo_term(h2_fifo *fifo)
     apr_status_t rv;
     if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
         fifo->aborted = 1;
-        apr_thread_mutex_unlock(fifo->lock);
-    }
-    return rv;
-}
-
-apr_status_t h2_fifo_interrupt(h2_fifo *fifo)
-{
-    apr_status_t rv;
-    if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
         apr_thread_cond_broadcast(fifo->not_empty);
         apr_thread_cond_broadcast(fifo->not_full);
         apr_thread_mutex_unlock(fifo->lock);
@@ -710,10 +701,6 @@ static apr_status_t fifo_push(h2_fifo *f
 {
     apr_status_t rv;
     
-    if (fifo->aborted) {
-        return APR_EOF;
-    }
-
     if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
         rv = fifo_push_int(fifo, elem, block);
         apr_thread_mutex_unlock(fifo->lock);
@@ -754,10 +741,6 @@ static apr_status_t fifo_pull(h2_fifo *f
 {
     apr_status_t rv;
     
-    if (fifo->aborted) {
-        return APR_EOF;
-    }
-    
     if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
         rv = pull_head(fifo, pelem, block);
         apr_thread_mutex_unlock(fifo->lock);
@@ -946,15 +929,6 @@ apr_status_t h2_ififo_term(h2_ififo *fif
     apr_status_t rv;
     if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
         fifo->aborted = 1;
-        apr_thread_mutex_unlock(fifo->lock);
-    }
-    return rv;
-}
-
-apr_status_t h2_ififo_interrupt(h2_ififo *fifo)
-{
-    apr_status_t rv;
-    if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
         apr_thread_cond_broadcast(fifo->not_empty);
         apr_thread_cond_broadcast(fifo->not_full);
         apr_thread_mutex_unlock(fifo->lock);
@@ -1018,10 +992,6 @@ static apr_status_t ififo_push(h2_ififo
 {
     apr_status_t rv;
     
-    if (fifo->aborted) {
-        return APR_EOF;
-    }
-
     if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
         rv = ififo_push_int(fifo, id, block);
         apr_thread_mutex_unlock(fifo->lock);
@@ -1062,10 +1032,6 @@ static apr_status_t ififo_pull(h2_ififo
 {
     apr_status_t rv;
     
-    if (fifo->aborted) {
-        return APR_EOF;
-    }
-    
     if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
         rv = ipull_head(fifo, pi, block);
         apr_thread_mutex_unlock(fifo->lock);
@@ -1088,10 +1054,6 @@ static apr_status_t ififo_peek(h2_ififo
     apr_status_t rv;
     int id;
     
-    if (fifo->aborted) {
-        return APR_EOF;
-    }
-    
     if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
         if (APR_SUCCESS == (rv = ipull_head(fifo, &id, block))) {
             switch (fn(id, ctx)) {
@@ -1117,39 +1079,40 @@ apr_status_t h2_ififo_try_peek(h2_ififo
     return ififo_peek(fifo, fn, ctx, 0);
 }
 
-apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
+static apr_status_t ififo_remove(h2_ififo *fifo, int id)
 {
-    apr_status_t rv;
+    int rc, i;
     
     if (fifo->aborted) {
         return APR_EOF;
     }
 
-    if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
-        int i, rc;
-        int e;
-        
-        rc = 0;
-        for (i = 0; i < fifo->count; ++i) {
-            e = fifo->elems[inth_index(fifo, i)];
-            if (e == id) {
-                ++rc;
-            }
-            else if (rc) {
-                fifo->elems[inth_index(fifo, i-rc)] = e;
-            }
-        }
-        if (rc) {
-            fifo->count -= rc;
-            if (fifo->count + rc == fifo->nelems) {
-                apr_thread_cond_broadcast(fifo->not_full);
-            }
-            rv = APR_SUCCESS;
+    rc = 0;
+    for (i = 0; i < fifo->count; ++i) {
+        int e = fifo->elems[inth_index(fifo, i)];
+        if (e == id) {
+            ++rc;
         }
-        else {
-            rv = APR_EAGAIN;
+        else if (rc) {
+            fifo->elems[inth_index(fifo, i-rc)] = e;
         }
-        
+    }
+    if (!rc) {
+        return APR_EAGAIN;
+    }
+    fifo->count -= rc;
+    if (fifo->count + rc == fifo->nelems) {
+        apr_thread_cond_broadcast(fifo->not_full);
+    }
+    return APR_SUCCESS;
+}
+
+apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
+{
+    apr_status_t rv;
+    
+    if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+        rv = ififo_remove(fifo, id);
         apr_thread_mutex_unlock(fifo->lock);
     }
     return rv;
@@ -1373,7 +1336,7 @@ apr_status_t h2_util_bb_avail(apr_bucket
         return status;
     }
     else if (blen == 0) {
-        /* brigade without data, does it have an EOS bucket somwhere? */
+        /* brigade without data, does it have an EOS bucket somewhere? */
         *plen = 0;
         *peos = h2_util_has_eos(bb, -1);
     }
@@ -1840,22 +1803,29 @@ int h2_res_ignore_trailer(const char *na
 }
 
 apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool, 
-                               const char *name, size_t nlen,
-                               const char *value, size_t vlen)
+                              const char *name, size_t nlen,
+                              const char *value, size_t vlen,
+                              size_t max_field_len, int *pwas_added)
 {
     char *hname, *hvalue;
+    const char *existing;
     
+    *pwas_added = 0;
     if (h2_req_ignore_header(name, nlen)) {
         return APR_SUCCESS;
     }
     else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
-        const char *existing = apr_table_get(headers, "cookie");
+        existing = apr_table_get(headers, "cookie");
         if (existing) {
             char *nval;
             
             /* Cookie header come separately in HTTP/2, but need
              * to be merged by "; " (instead of default ", ")
              */
+            if (max_field_len && strlen(existing) + vlen + nlen + 4 > max_field_len) {
+                /* "key: oldval, nval" is too long */
+                return APR_EINVAL;
+            }
             hvalue = apr_pstrndup(pool, value, vlen);
             nval = apr_psprintf(pool, "%s; %s", existing, hvalue);
             apr_table_setn(headers, "Cookie", nval);
@@ -1869,8 +1839,16 @@ apr_status_t h2_req_add_header(apr_table
     }
     
     hname = apr_pstrndup(pool, name, nlen);
-    hvalue = apr_pstrndup(pool, value, vlen);
     h2_util_camel_case_header(hname, nlen);
+    existing = apr_table_get(headers, hname);
+    if (max_field_len) {
+        if ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len) {
+            /* "key: (oldval, )?nval" is too long */
+            return APR_EINVAL;
+        }
+    }
+    if (!existing) *pwas_added = 1;
+    hvalue = apr_pstrndup(pool, value, vlen);
     apr_table_mergen(headers, hname, hvalue);
     
     return APR_SUCCESS;
@@ -1960,7 +1938,8 @@ int h2_util_frame_print(const nghttp2_fr
         case NGHTTP2_GOAWAY: {
             size_t len = (frame->goaway.opaque_data_len < s_len)?
                 frame->goaway.opaque_data_len : s_len-1;
-            memcpy(scratch, frame->goaway.opaque_data, len);
+            if (len)
+                memcpy(scratch, frame->goaway.opaque_data, len);
             scratch[len] = '\0';
             return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', "
                                 "last_stream=%d]", frame->goaway.error_code, 
diff -up httpd-2.4.33/modules/http2/h2_util.h httpd-2.4.46/modules/http2/h2_util.h
--- httpd-2.4.33/modules/http2/h2_util.h	2018-02-10 16:46:12.000000000 +0100
+++ httpd-2.4.46/modules/http2/h2_util.h	2020-07-15 16:59:43.000000000 +0200
@@ -209,7 +209,6 @@ apr_status_t h2_fifo_create(h2_fifo **pf
 apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
 
 apr_status_t h2_fifo_term(h2_fifo *fifo);
-apr_status_t h2_fifo_interrupt(h2_fifo *fifo);
 
 int h2_fifo_count(h2_fifo *fifo);
 
@@ -229,7 +228,7 @@ apr_status_t h2_fifo_try_pull(h2_fifo *f
 
 typedef enum {
     H2_FIFO_OP_PULL,   /* pull the element from the queue, ie discard it */
-    H2_FIFO_OP_REPUSH, /* pull and immediatley re-push it */
+    H2_FIFO_OP_REPUSH, /* pull and immediately re-push it */
 } h2_fifo_op_t;
 
 typedef h2_fifo_op_t h2_fifo_peek_fn(void *head, void *ctx);
@@ -280,7 +279,6 @@ apr_status_t h2_ififo_create(h2_ififo **
 apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
 
 apr_status_t h2_ififo_term(h2_ififo *fifo);
-apr_status_t h2_ififo_interrupt(h2_ififo *fifo);
 
 int h2_ififo_count(h2_ififo *fifo);
 
@@ -412,9 +410,14 @@ apr_status_t h2_res_create_ngheader(h2_n
 apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p, 
                                     const struct h2_request *req);
 
+/**
+ * Add a HTTP/2 header and return the table key if it really was added
+ * and not ignored.
+ */
 apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool, 
                                const char *name, size_t nlen,
-                               const char *value, size_t vlen);
+                               const char *value, size_t vlen,
+                               size_t max_field_len, int *pwas_added);
 
 /*******************************************************************************
  * h2_request helpers
diff -up httpd-2.4.33/modules/http2/h2_version.h httpd-2.4.46/modules/http2/h2_version.h
--- httpd-2.4.33/modules/http2/h2_version.h	2020-08-11 15:45:10.505094163 +0200
+++ httpd-2.4.46/modules/http2/h2_version.h	2020-07-29 14:33:53.000000000 +0200
@@ -27,7 +27,7 @@
  * @macro
  * Version number of the http2 module as c string
  */
-#define MOD_HTTP2_VERSION "1.15.4"
+#define MOD_HTTP2_VERSION "1.15.14"
 
 /**
  * @macro
@@ -35,6 +35,6 @@
  * release. This is a 24 bit number with 8 bits for major number, 8 bits
  * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
  */
-#define MOD_HTTP2_VERSION_NUM 0x010f04
+#define MOD_HTTP2_VERSION_NUM 0x010f0e
 
 #endif /* mod_h2_h2_version_h */
Only in httpd-2.4.46/modules/http2/: .h2_version.h.swp
diff -up httpd-2.4.33/modules/http2/h2_workers.c httpd-2.4.46/modules/http2/h2_workers.c
--- httpd-2.4.33/modules/http2/h2_workers.c	2018-02-10 16:46:12.000000000 +0100
+++ httpd-2.4.46/modules/http2/h2_workers.c	2020-07-08 13:53:48.000000000 +0200
@@ -155,7 +155,7 @@ static apr_status_t slot_pull_task(h2_sl
 {
     apr_status_t rv;
     
-    rv = h2_mplx_pop_task(m, &slot->task);
+    rv = h2_mplx_s_pop_task(m, &slot->task);
     if (slot->task) {
         /* Ok, we got something to give back to the worker for execution. 
          * If we still have idle workers, we let the worker be sticky, 
@@ -234,10 +234,10 @@ static void* APR_THREAD_FUNC slot_run(ap
              * mplx the opportunity to give us back a new task right away.
              */
             if (!slot->aborted && (--slot->sticks > 0)) {
-                h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task);
+                h2_mplx_s_task_done(slot->task->mplx, slot->task, &slot->task);
             }
             else {
-                h2_mplx_task_done(slot->task->mplx, slot->task, NULL);
+                h2_mplx_s_task_done(slot->task->mplx, slot->task, NULL);
                 slot->task = NULL;
             }
         }
@@ -269,7 +269,6 @@ static apr_status_t workers_pool_cleanup
         }
 
         h2_fifo_term(workers->mplxs);
-        h2_fifo_interrupt(workers->mplxs);
 
         cleanup_zombies(workers);
     }
diff -up httpd-2.4.33/modules/http2/mod_http2.c httpd-2.4.46/modules/http2/mod_http2.c
--- httpd-2.4.33/modules/http2/mod_http2.c	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/mod_http2.c	2020-07-08 13:53:48.000000000 +0200
@@ -237,7 +237,7 @@ static const char *val_H2_PUSH(apr_pool_
     if (ctx) {
         if (r) {
             if (ctx->task) {
-                h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id);
+                h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
                 if (stream && stream->push_policy != H2_PUSH_NONE) {
                     return "on";
                 }
@@ -271,7 +271,7 @@ static const char *val_H2_PUSHED_ON(apr_
 {
     if (ctx) {
         if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
-            h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id);
+            h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
             if (stream) {
                 return apr_itoa(p, stream->initiated_on);
             }
diff -up httpd-2.4.33/modules/http2/mod_http2.h httpd-2.4.46/modules/http2/mod_http2.h
--- httpd-2.4.33/modules/http2/mod_http2.h	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/mod_http2.h	2020-02-21 01:33:40.000000000 +0100
@@ -35,7 +35,7 @@ APR_DECLARE_OPTIONAL_FN(int,
 
 /* The following functions were introduced for the experimental mod_proxy_http2
  * support, but have been abandoned since.
- * They are still declared here for backward compatibiliy, in case someone
+ * They are still declared here for backward compatibility, in case someone
  * tries to build an old mod_proxy_http2 against it, but will disappear
  * completely sometime in the future.
  */ 
diff -up httpd-2.4.33/modules/http2/mod_proxy_http2.c httpd-2.4.46/modules/http2/mod_proxy_http2.c
--- httpd-2.4.33/modules/http2/mod_proxy_http2.c	2020-08-11 15:45:10.517094239 +0200
+++ httpd-2.4.46/modules/http2/mod_proxy_http2.c	2020-01-30 16:14:40.000000000 +0100
@@ -401,6 +401,14 @@ run_connect:
          */
         apr_table_setn(ctx->p_conn->connection->notes,
                        "proxy-request-alpn-protos", "h2");
+        if (ctx->p_conn->ssl_hostname) {
+            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
+                          "set SNI to %s for (%s)",
+                          ctx->p_conn->ssl_hostname,
+                          ctx->p_conn->hostname);
+            apr_table_setn(ctx->p_conn->connection->notes,
+                           "proxy-request-hostname", ctx->p_conn->ssl_hostname);
+        }
     }
 
     if (ctx->master->aborted) goto cleanup;
openSUSE Build Service is sponsored by