File apache2-http2-security-issues.patch of Package apache2.27541

Fixes for CVE-2019-9517 (bsc#1145575), CVE-2019-10082 (bsc#1145741), CVE-2019-10081 (bsc#1145742)

Consists of the following 2.4.x commits:

From 9f71c7ad5efc5ff3a234852ddf4f17dcb4b4e7f1 Mon Sep 17 00:00:00 2001
From: Jim Jagielski <jim@apache.org>
Date: Thu, 13 Jun 2019 11:08:29 +0000
Subject: [PATCH] Merge r1860260 from trunk:

 * modules/http2: more copying of data to disentangle worker processing from main connection

Submitted by: icing
Reviewed by: icing, covener, jim

From 768fbd906bd20e47ede2da52ef9d3a6c2d07f79d Mon Sep 17 00:00:00 2001
From: Jim Jagielski <jim@apache.org>
Date: Thu, 13 Jun 2019 11:09:12 +0000
Subject: [PATCH] Merge r1707084, r1707093, r1707159, r1707362 from trunk:

eor_bucket: don't destroy the request multiple times should any filter
do a copy (e.g. mod_bucketeer).

eor_bucket: follow up to r1707084: fix comment.

From 03222c07d905bcf668028e0fec6b257a6c6964c6 Mon Sep 17 00:00:00 2001
From: Stefan Eissing <icing@apache.org>
Date: Thu, 1 Aug 2019 08:18:03 +0000
Subject: [PATCH] Merge of r1861338,1862475,1862583,1862865,1863221,1863276
 from trunk:

  *) mod_http2: core setting "LimitRequestFieldSize" is not additionally checked on
     merged header fields, just as HTTP/1.1 does. [Stefan Eissing, Michael Kaufmann]

  *) mod_http2: fixed a bug that prevented proper stream cleanup when connection
     throttling was in place. Stream resets by clients on streams initiated by them
     are counted as possible trigger for throttling. [Stefan Eissing]

  *) mod_http2/mpm_event: Fixes the behaviour when a HTTP/2 connection has nothing
     more to write with streams ongoing (flow control block). The timeout waiting
     for the client to send WINODW_UPDATE was incorrectly KeepAliveTimeout and not
     Timeout as it should be. Fixes PR 63534. [Yann Ylavic, Stefan Eissing]

Index: httpd-2.4.33/modules/http2/h2_conn.c
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_conn.c	2019-08-19 16:16:12.971819465 +0200
+++ httpd-2.4.33/modules/http2/h2_conn.c	2019-08-19 16:16:13.099820280 +0200
@@ -231,6 +231,13 @@ apr_status_t h2_conn_run(conn_rec *c)
             case H2_SESSION_ST_BUSY:
             case H2_SESSION_ST_WAIT:
                 c->cs->state = CONN_STATE_WRITE_COMPLETION;
+                if (c->cs && (session->open_streams || !session->remote.emitted_count)) {
+                    /* let the MPM know that we are not done and want
+                     * the Timeout behaviour instead of a KeepAliveTimeout
+                     * See PR 63534. 
+                     */
+                    c->cs->sense = CONN_SENSE_WANT_READ;
+                }
                 break;
             case H2_SESSION_ST_CLEANUP:
             case H2_SESSION_ST_DONE:
Index: httpd-2.4.33/modules/http2/h2_filter.c
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_filter.c	2019-08-19 16:16:12.971819465 +0200
+++ httpd-2.4.33/modules/http2/h2_filter.c	2019-08-19 16:16:13.099820280 +0200
@@ -493,6 +493,52 @@ static apr_status_t status_event(void *c
     return APR_SUCCESS;
 }
 
+static apr_status_t discard_body(request_rec *r, apr_off_t maxlen)
+{
+    apr_bucket_brigade *bb;
+    int seen_eos;
+    apr_status_t rv;
+
+    bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+    seen_eos = 0;
+    do {
+        apr_bucket *bucket;
+
+        rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+                            APR_BLOCK_READ, HUGE_STRING_LEN);
+
+        if (rv != APR_SUCCESS) {
+            apr_brigade_destroy(bb);
+            return rv;
+        }
+
+        for (bucket = APR_BRIGADE_FIRST(bb);
+             bucket != APR_BRIGADE_SENTINEL(bb);
+             bucket = APR_BUCKET_NEXT(bucket))
+        {
+            const char *data;
+            apr_size_t len;
+
+            if (APR_BUCKET_IS_EOS(bucket)) {
+                seen_eos = 1;
+                break;
+            }
+            if (bucket->length == 0) {
+                continue;
+            }
+            rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
+            if (rv != APR_SUCCESS) {
+                apr_brigade_destroy(bb);
+                return rv;
+            }
+            maxlen -= bucket->length;
+        }
+        apr_brigade_cleanup(bb);
+    } while (!seen_eos && maxlen >= 0);
+
+    return APR_SUCCESS;
+}
+
 int h2_filter_h2_status_handler(request_rec *r)
 {
     conn_rec *c = r->connection;
@@ -510,8 +556,10 @@ int h2_filter_h2_status_handler(request_
 
     task = h2_ctx_get_task(r->connection);
     if (task) {
-
-        if ((status = ap_discard_request_body(r)) != OK) {
+        /* In this handler, we do some special sauce to send footers back,
+         * IFF we received footers in the request. This is used in our test
+         * cases, since CGI has no way of handling those. */
+        if ((status = discard_body(r, 1024)) != OK) {
             return status;
         }
         
Index: httpd-2.4.33/modules/http2/h2_headers.c
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_headers.c	2019-08-19 16:16:12.971819465 +0200
+++ httpd-2.4.33/modules/http2/h2_headers.c	2019-08-19 16:16:13.099820280 +0200
@@ -102,8 +102,9 @@ apr_bucket *h2_bucket_headers_beam(struc
                                     const apr_bucket *src)
 {
     if (H2_BUCKET_IS_HEADERS(src)) {
-        h2_headers *r = ((h2_bucket_headers *)src->data)->headers;
-        apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r);
+        h2_headers *src_headers = ((h2_bucket_headers *)src->data)->headers;
+        apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, 
+                                                 h2_headers_clone(dest->p, src_headers));
         APR_BRIGADE_INSERT_TAIL(dest, b);
         return b;
     }
@@ -156,7 +157,14 @@ h2_headers *h2_headers_rcreate(request_r
 
 h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
 {
-    return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
+    return h2_headers_create(h->status, apr_table_copy(pool, h->headers), 
+                             apr_table_copy(pool, h->notes), h->raw_bytes, pool);
+}
+
+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h)
+{
+    return h2_headers_create(h->status, apr_table_clone(pool, h->headers), 
+                             apr_table_clone(pool, h->notes), h->raw_bytes, pool);
 }
 
 h2_headers *h2_headers_die(apr_status_t type,
Index: httpd-2.4.33/modules/http2/h2_headers.h
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_headers.h	2019-08-19 16:16:12.879818882 +0200
+++ httpd-2.4.33/modules/http2/h2_headers.h	2019-08-19 16:16:13.099820280 +0200
@@ -59,12 +59,18 @@ h2_headers *h2_headers_rcreate(request_r
                                  apr_table_t *header, apr_pool_t *pool);
 
 /**
- * Clone the headers into another pool. This will not copy any
+ * Copy the headers into another pool. This will not copy any
  * header strings.
  */
 h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
 
 /**
+ * Clone the headers into another pool. This will also clone any
+ * header strings.
+ */
+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h);
+
+/**
  * Create the headers for the given error.
  * @param stream_id id of the stream to create the headers for
  * @param type the error code
Index: httpd-2.4.33/modules/http2/h2_mplx.c
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_mplx.c	2019-08-19 16:16:12.975819491 +0200
+++ httpd-2.4.33/modules/http2/h2_mplx.c	2019-08-19 16:17:34.276336122 +0200
@@ -53,8 +53,12 @@ typedef struct {
     h2_mplx *m;
     h2_stream *stream;
     apr_time_t now;
+    apr_size_t count;
 } stream_iter_ctx;
 
+static apr_status_t mplx_be_happy(h2_mplx *m);
+static apr_status_t mplx_be_annoyed(h2_mplx *m);
+
 apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
 {
     return APR_SUCCESS;
@@ -98,7 +102,7 @@ static void stream_input_consumed(void *
 
 static void stream_joined(h2_mplx *m, h2_stream *stream)
 {
-    ap_assert(!stream->task || stream->task->worker_done);
+    ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
     
     h2_ihash_remove(m->shold, stream->id);
     h2_ihash_add(m->spurge, stream);
@@ -124,7 +128,7 @@ static void stream_cleanup(h2_mplx *m, h
     h2_ififo_remove(m->readyq, stream->id);
     h2_ihash_add(m->shold, stream);
     
-    if (!stream->task || stream->task->worker_done) {
+    if (!h2_task_has_started(stream->task) || stream->task->done_done) {
         stream_joined(m, stream);
     }
     else if (stream->task) {
@@ -194,7 +198,6 @@ h2_mplx *h2_mplx_create(conn_rec *c, ser
         m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
 
         m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
-        m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
         m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
         m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
         m->q = h2_iq_create(m->pool, m->max_streams);
@@ -208,8 +211,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, ser
         m->workers = workers;
         m->max_active = workers->max_workers;
         m->limit_active = 6; /* the original h1 max parallel connections */
-        m->last_limit_change = m->last_idle_block = apr_time_now();
-        m->limit_change_interval = apr_time_from_msec(100);
+        m->last_mood_change = apr_time_now();
+        m->mood_update_interval = apr_time_from_msec(100);
         
         m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
     }
@@ -431,6 +434,10 @@ void h2_mplx_release_and_join(h2_mplx *m
 
     /* How to shut down a h2 connection:
      * 1. cancel all streams still active */
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, 
+                  "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks", 
+                  m->id, (int)h2_ihash_count(m->streams),
+                  (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
     while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
         /* until empty */
     }
@@ -456,10 +463,10 @@ void h2_mplx_release_and_join(h2_mplx *m
             h2_ihash_iter(m->shold, report_stream_iter, m);
         }
     }
-    ap_assert(m->tasks_active == 0);
     m->join_wait = NULL;
-    
+
     /* 4. With all workers done, all streams should be in spurge */
+    ap_assert(m->tasks_active == 0);
     if (!h2_ihash_empty(m->shold)) {
         ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
                       "h2_mplx(%ld): unexpected %d streams in hold", 
@@ -470,8 +477,7 @@ void h2_mplx_release_and_join(h2_mplx *m
     m->c->aborted = old_aborted;
     H2_MPLX_LEAVE(m);
 
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-                  "h2_mplx(%ld): released", m->id);
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
 }
 
 apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
@@ -709,7 +715,6 @@ static h2_task *next_stream_task(h2_mplx
             }
             
             if (!stream->task) {
-
                 if (sid > m->max_stream_started) {
                     m->max_stream_started = sid;
                 }
@@ -728,9 +733,9 @@ static h2_task *next_stream_task(h2_mplx
                                   "create task"));
                     return NULL;
                 }
-                
             }
             
+            stream->task->started_at = apr_time_now();
             ++m->tasks_active;
             return stream->task;
         }
@@ -778,32 +783,18 @@ static void task_done(h2_mplx *m, h2_tas
                   "h2_mplx(%s): request done, %f ms elapsed", task->id, 
                   (task->done_at - task->started_at) / 1000.0);
     
-    if (task->started_at > m->last_idle_block) {
-        /* this task finished without causing an 'idle block', e.g.
-         * a block by flow control.
-         */
-        if (task->done_at- m->last_limit_change >= m->limit_change_interval
-            && m->limit_active < m->max_active) {
-            /* Well behaving stream, allow it more workers */
-            m->limit_active = H2MIN(m->limit_active * 2, 
-                                     m->max_active);
-            m->last_limit_change = task->done_at;
-            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-                          "h2_mplx(%ld): increase worker limit to %d",
-                          m->id, m->limit_active);
-        }
+    if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
+        mplx_be_happy(m);
     }
-
+    
     ap_assert(task->done_done == 0);
 
     stream = h2_ihash_get(m->streams, task->stream_id);
     if (stream) {
         /* stream not done yet. */
-        if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
+        if (!m->aborted && task->redo) {
             /* reset and schedule again */
-            task->worker_done = 0;
             h2_task_redo(task);
-            h2_ihash_remove(m->sredo, stream->id);
             h2_iq_add(m->q, stream->id, NULL, NULL);
             ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
                           H2_STRM_MSG(stream, "redo, added to q"));
@@ -848,8 +839,8 @@ void h2_mplx_task_done(h2_mplx *m, h2_ta
 {
     H2_MPLX_ENTER_ALWAYS(m);
 
-    task_done(m, task);
     --m->tasks_active;
+    task_done(m, task);
     
     if (m->join_wait) {
         apr_thread_cond_signal(m->join_wait);
@@ -867,94 +858,162 @@ void h2_mplx_task_done(h2_mplx *m, h2_ta
  * h2_mplx DoS protection
  ******************************************************************************/
 
-static int latest_repeatable_unsubmitted_iter(void *data, void *val)
+static int timed_out_busy_iter(void *data, void *val)
 {
     stream_iter_ctx *ctx = data;
     h2_stream *stream = val;
     
-    if (stream->task && !stream->task->worker_done 
-        && h2_task_can_redo(stream->task) 
-        && !h2_ihash_get(ctx->m->sredo, stream->id)) {
-        if (!h2_stream_is_ready(stream)) {
-            /* this task occupies a worker, the response has not been submitted 
-             * yet, not been cancelled and it is a repeatable request
-             * -> it can be re-scheduled later */
-            if (!ctx->stream 
-                || (ctx->stream->task->started_at < stream->task->started_at)) {
-                /* we did not have one or this one was started later */
-                ctx->stream = stream;
-            }
-        }
+    if (h2_task_has_started(stream->task) && !stream->task->worker_done
+        && (ctx->now - stream->task->started_at) > stream->task->timeout) {
+        /* timed out stream occupying a worker, found */
+        ctx->stream = stream;
+        return 0;
     }
     return 1;
 }
 
-static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m) 
+static h2_stream *get_timed_out_busy_stream(h2_mplx *m) 
 {
     stream_iter_ctx ctx;
     ctx.m = m;
     ctx.stream = NULL;
-    h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
+    ctx.now = apr_time_now();
+    h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
     return ctx.stream;
 }
 
-static int timed_out_busy_iter(void *data, void *val)
+static int latest_repeatable_unsubmitted_iter(void *data, void *val)
 {
     stream_iter_ctx *ctx = data;
     h2_stream *stream = val;
-    if (stream->task && !stream->task->worker_done
-        && (ctx->now - stream->task->started_at) > stream->task->timeout) {
-        /* timed out stream occupying a worker, found */
-        ctx->stream = stream;
-        return 0;
+    
+    if (!stream->task) goto leave;
+    if (!h2_task_has_started(stream->task) || stream->task->worker_done) goto leave;
+    if (h2_stream_is_ready(stream)) goto leave;
+    if (stream->task->redo) {
+        ++ctx->count;
+        goto leave;
+    }
+    if (h2_task_can_redo(stream->task)) {
+        /* this task occupies a worker, the response has not been submitted 
+         * yet, not been cancelled and it is a repeatable request
+         * -> we could redo it later */
+        if (!ctx->stream 
+            || (ctx->stream->task->started_at < stream->task->started_at)) {
+            /* we did not have one or this one was started later */
+            ctx->stream = stream;
+        }
     }
+leave:
     return 1;
 }
 
-static h2_stream *get_timed_out_busy_stream(h2_mplx *m) 
+static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m) 
 {
     stream_iter_ctx ctx;
+    
+    /* count the running tasks already marked for redo and get one that could
+     * be throttled */
+    *ptask = NULL;
     ctx.m = m;
     ctx.stream = NULL;
-    ctx.now = apr_time_now();
-    h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
-    return ctx.stream;
+    ctx.count = 0;
+    h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
+    if (m->tasks_active - ctx.count > m->limit_active) {
+        /* we are above the limit of running tasks, accounting for the ones
+         * already throttled. */
+        if (ctx.stream && ctx.stream->task) {
+            *ptask = ctx.stream->task;
+            return APR_EAGAIN;
+        }
+        /* above limit, be seeing no candidate for easy throttling */
+        if (get_timed_out_busy_stream(m)) {
+            /* Too many busy workers, unable to cancel enough streams
+             * and with a busy, timed out stream, we tell the client
+             * to go away... */
+            return APR_TIMEUP;
+        }
+    }
+    return APR_SUCCESS;
 }
 
 static apr_status_t unschedule_slow_tasks(h2_mplx *m) 
 {
-    h2_stream *stream;
-    int n;
+    h2_task *task;
+    apr_status_t rv;
     
     /* Try to get rid of streams that occupy workers. Look for safe requests
      * that are repeatable. If none found, fail the connection.
      */
-    n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
-    while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
+    while (APR_EAGAIN == (rv = assess_task_to_throttle(&task, m))) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
                       "h2_mplx(%s): unschedule, resetting task for redo later",
-                      stream->task->id);
-        h2_task_rst(stream->task, H2_ERR_CANCEL);
-        h2_ihash_add(m->sredo, stream);
-        --n;
+                      task->id);
+        task->redo = 1;
+        h2_task_rst(task, H2_ERR_CANCEL);
     }
     
-    if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) {
-        h2_stream *stream = get_timed_out_busy_stream(m);
-        if (stream) {
-            /* Too many busy workers, unable to cancel enough streams
-             * and with a busy, timed out stream, we tell the client
-             * to go away... */
-            return APR_TIMEUP;
-        }
+    return rv;
+}
+
+static apr_status_t mplx_be_happy(h2_mplx *m)
+{
+    apr_time_t now;            
+
+    --m->irritations_since;
+    now = apr_time_now();
+    if (m->limit_active < m->max_active 
+        && (now - m->last_mood_change >= m->mood_update_interval
+            || m->irritations_since < -m->limit_active)) {
+        m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
+        m->last_mood_change = now;
+        m->irritations_since = 0;
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+                      "h2_mplx(%ld): mood update, increasing worker limit to %d",
+                      m->id, m->limit_active);
     }
     return APR_SUCCESS;
 }
 
-apr_status_t h2_mplx_idle(h2_mplx *m)
+static apr_status_t mplx_be_annoyed(h2_mplx *m)
 {
     apr_status_t status = APR_SUCCESS;
     apr_time_t now;            
+
+    ++m->irritations_since;
+    now = apr_time_now();
+    if (m->limit_active > 2 && 
+        ((now - m->last_mood_change >= m->mood_update_interval)
+         || (m->irritations_since >= m->limit_active))) {
+            
+        if (m->limit_active > 16) {
+            m->limit_active = 16;
+        }
+        else if (m->limit_active > 8) {
+            m->limit_active = 8;
+        }
+        else if (m->limit_active > 4) {
+            m->limit_active = 4;
+        }
+        else if (m->limit_active > 2) {
+            m->limit_active = 2;
+        }
+        m->last_mood_change = now;
+        m->irritations_since = 0;
+        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+                      "h2_mplx(%ld): mood update, decreasing worker limit to %d",
+                      m->id, m->limit_active);
+    }
+    
+    if (m->tasks_active > m->limit_active) {
+        status = unschedule_slow_tasks(m);
+    }
+    return status;
+}
+
+apr_status_t h2_mplx_idle(h2_mplx *m)
+{
+    apr_status_t status = APR_SUCCESS;
     apr_size_t scount;
     
     H2_MPLX_ENTER(m);
@@ -974,31 +1033,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
              * of busy workers we allow for this connection until it
              * well behaves.
              */
-            now = apr_time_now();
-            m->last_idle_block = now;
-            if (m->limit_active > 2 
-                && now - m->last_limit_change >= m->limit_change_interval) {
-                if (m->limit_active > 16) {
-                    m->limit_active = 16;
-                }
-                else if (m->limit_active > 8) {
-                    m->limit_active = 8;
-                }
-                else if (m->limit_active > 4) {
-                    m->limit_active = 4;
-                }
-                else if (m->limit_active > 2) {
-                    m->limit_active = 2;
-                }
-                m->last_limit_change = now;
-                ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-                              "h2_mplx(%ld): decrease worker limit to %d",
-                              m->id, m->limit_active);
-            }
-            
-            if (m->tasks_active > m->limit_active) {
-                status = unschedule_slow_tasks(m);
-            }
+            status = mplx_be_annoyed(m);
         }
         else if (!h2_iq_empty(m->q)) {
             ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
@@ -1093,11 +1128,24 @@ int h2_mplx_awaits_data(h2_mplx *m)
     if (h2_ihash_empty(m->streams)) {
         waiting = 0;
     }
-    else if (!m->tasks_active && !h2_ififo_count(m->readyq)
-             && h2_iq_empty(m->q)) {
+    else if (!m->tasks_active && !h2_ififo_count(m->readyq) && h2_iq_empty(m->q)) {
         waiting = 0;
     }
 
     H2_MPLX_LEAVE(m);
     return waiting;
 }
+
+apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id)
+{
+    h2_stream *stream;
+    apr_status_t status = APR_SUCCESS;
+    
+    H2_MPLX_ENTER_ALWAYS(m);
+    stream = h2_ihash_get(m->streams, stream_id);
+    if (stream && stream->task) {
+        status = mplx_be_annoyed(m);
+    }
+    H2_MPLX_LEAVE(m);
+    return status;
+}
Index: httpd-2.4.33/modules/http2/h2_mplx.h
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_mplx.h	2019-08-19 16:16:12.975819491 +0200
+++ httpd-2.4.33/modules/http2/h2_mplx.h	2019-08-19 16:16:13.099820280 +0200
@@ -63,7 +63,6 @@ struct h2_mplx {
     unsigned int is_registered;     /* is registered at h2_workers */
 
     struct h2_ihash_t *streams;     /* all streams currently processing */
-    struct h2_ihash_t *sredo;       /* all streams that need to be re-started */
     struct h2_ihash_t *shold;       /* all streams done with task ongoing */
     struct h2_ihash_t *spurge;      /* all streams done, ready for destroy */
     
@@ -77,10 +76,10 @@ struct h2_mplx {
     int tasks_active;       /* # of tasks being processed from this mplx */
     int limit_active;       /* current limit on active tasks, dynamic */
     int max_active;         /* max, hard limit # of active tasks in a process */
-    apr_time_t last_idle_block;      /* last time, this mplx entered IDLE while
-                                      * streams were ready */
-    apr_time_t last_limit_change;    /* last time, worker limit changed */
-    apr_interval_time_t limit_change_interval;
+    
+    apr_time_t last_mood_change; /* last time, we worker limit changed */
+    apr_interval_time_t mood_update_interval; /* how frequent we update at most */
+    int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
 
     apr_thread_mutex_t *lock;
     struct apr_thread_cond_t *added_output;
@@ -205,6 +204,8 @@ typedef int h2_mplx_stream_cb(struct h2_
 
 apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
 
+apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id);
+
 /*******************************************************************************
  * Output handling of streams.
  ******************************************************************************/
Index: httpd-2.4.33/modules/http2/h2_proxy_session.c
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_proxy_session.c	2019-08-19 16:16:12.975819491 +0200
+++ httpd-2.4.33/modules/http2/h2_proxy_session.c	2019-08-19 16:16:13.099820280 +0200
@@ -545,7 +545,7 @@ static ssize_t stream_request_data(nghtt
         }
 
         stream->data_sent += readlen;
-        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468) 
+        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(10179) 
                       "h2_proxy_stream(%d): request DATA %ld, %ld"
                       " total, flags=%d", 
                       stream->id, (long)readlen, (long)stream->data_sent,
Index: httpd-2.4.33/modules/http2/h2_session.c
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_session.c	2019-08-19 16:16:12.975819491 +0200
+++ httpd-2.4.33/modules/http2/h2_session.c	2019-08-19 16:16:13.103820304 +0200
@@ -390,9 +390,14 @@ static int on_frame_recv_cb(nghttp2_sess
                           (int)frame->rst_stream.error_code);
             stream = h2_session_stream_get(session, frame->hd.stream_id);
             if (stream && stream->initiated_on) {
+                /* A stream reset on a request we sent it. Normal, when the
+                 * client does not want it. */
                 ++session->pushes_reset;
             }
             else {
+                /* A stream reset on a request it sent us. Could happen in a browser
+                 * when the user navigates away or cancels loading - maybe. */
+                h2_mplx_client_rst(session->mplx, frame->hd.stream_id);
                 ++session->streams_reset;
             }
             break;
@@ -1703,7 +1708,7 @@ static void transit(h2_session *session,
                      * that already served requests - not fair. */
                     session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
                     s = "timeout";
-                    timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout);
+                    timeout = session->s->timeout;
                     update_child_status(session, SERVER_BUSY_READ, "idle");
                     ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, 
                                   H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"), 
@@ -1711,8 +1716,8 @@ static void transit(h2_session *session,
                 }
                 else if (session->open_streams) {
                     s = "timeout";
-                    timeout = session->s->keep_alive_timeout;
-                    update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
+                    timeout = session->s->timeout;
+                    update_child_status(session, SERVER_BUSY_READ, "idle");
                 }
                 else {
                     /* normal keepalive setup */
@@ -1977,6 +1982,7 @@ static void on_stream_state_enter(void *
             ev_stream_closed(session, stream);
             break;
         case H2_SS_CLEANUP:
+            nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL);
             h2_mplx_stream_cleanup(session->mplx, stream);
             break;
         default:
@@ -2169,6 +2175,14 @@ apr_status_t h2_session_process(h2_sessi
                         session->have_read = 1;
                     }
                     else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
+                        status = h2_mplx_idle(session->mplx);
+                        if (status == APR_EAGAIN) {
+                            break;
+                        }
+                        else if (status != APR_SUCCESS) {
+                            dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 
+                                           H2_ERR_ENHANCE_YOUR_CALM, "less is more");
+                        }
                         status = APR_EAGAIN;
                         goto out;
                     }
Index: httpd-2.4.33/modules/http2/h2_stream.c
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_stream.c	2019-08-19 16:16:12.975819491 +0200
+++ httpd-2.4.33/modules/http2/h2_stream.c	2019-08-19 16:16:13.103820304 +0200
@@ -397,13 +397,8 @@ apr_status_t h2_stream_send_frame(h2_str
                 /* start pushed stream */
                 ap_assert(stream->request == NULL);
                 ap_assert(stream->rtmp != NULL);
-                status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0);
-                if (status != APR_SUCCESS) {
-                    return status;
-                }
-                set_policy_for(stream, stream->rtmp);
-                stream->request = stream->rtmp;
-                stream->rtmp = NULL;
+                status = h2_stream_end_headers(stream, 1, 0);
+                if (status != APR_SUCCESS) goto leave;
             break;
             
         default:
@@ -415,6 +410,7 @@ apr_status_t h2_stream_send_frame(h2_str
     if (status == APR_SUCCESS && eos) {
         status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
     }
+leave:
     return status;
 }
 
@@ -455,13 +451,8 @@ apr_status_t h2_stream_recv_frame(h2_str
                      * to abort the connection here, since this is clearly a protocol error */
                     return APR_EINVAL;
                 }
-                status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len);
-                if (status != APR_SUCCESS) {
-                    return status;
-                }
-                set_policy_for(stream, stream->rtmp);
-                stream->request = stream->rtmp;
-                stream->rtmp = NULL;
+                status = h2_stream_end_headers(stream, eos, frame_len);
+                if (status != APR_SUCCESS) goto leave;
             }
             break;
             
@@ -472,6 +463,7 @@ apr_status_t h2_stream_recv_frame(h2_str
     if (status == APR_SUCCESS && eos) {
         status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
     }
+leave:
     return status;
 }
 
@@ -683,6 +675,8 @@ static apr_status_t add_trailer(h2_strea
     hvalue = apr_pstrndup(stream->pool, value, vlen);
     h2_util_camel_case_header(hname, nlen);
     apr_table_mergen(stream->trailers, hname, hvalue);
+    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, 
+                  H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
     
     return APR_SUCCESS;
 }
@@ -702,15 +696,19 @@ apr_status_t h2_stream_add_header(h2_str
     if (name[0] == ':') {
         if ((vlen) > session->s->limit_req_line) {
             /* pseudo header: approximation of request line size check */
-            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-                          H2_STRM_MSG(stream, "pseudo %s too long"), name);
+            ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,  
+                          H2_STRM_LOG(APLOGNO(10178), stream, 
+                                      "Request pseudo header exceeds "
+                                      "LimitRequestFieldSize: %s"), name);
             error = HTTP_REQUEST_URI_TOO_LARGE;
         }
     }
     else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
         /* header too long */
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-                      H2_STRM_MSG(stream, "header %s too long"), name);
+        ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,  
+                      H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
+                                  "LimitRequestFieldSize: %.*s"),
+                      (int)H2MIN(nlen, 80), name);
         error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
     }
     
@@ -722,8 +720,9 @@ apr_status_t h2_stream_add_header(h2_str
             h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
             return APR_ECONNRESET;
         }
-        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-                      H2_STRM_MSG(stream, "too many header lines")); 
+        ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c, 
+                      H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
+                                  "exceeds LimitRequestFields"));
         error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
     }
     
@@ -753,6 +752,47 @@ apr_status_t h2_stream_add_header(h2_str
     }
     return status;
 }
+
+typedef struct {
+    apr_size_t maxlen;
+    const char *failed_key;
+} val_len_check_ctx;
+
+static int table_check_val_len(void *baton, const char *key, const char *value)
+{
+    val_len_check_ctx *ctx = baton;
+
+    if (strlen(value) <= ctx->maxlen) return 1;
+    ctx->failed_key = key;
+    return 0;
+}
+
+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
+{
+    apr_status_t status;
+    val_len_check_ctx ctx;
+    
+    status = h2_request_end_headers(stream->rtmp, stream->pool, eos, raw_bytes);
+    if (APR_SUCCESS == status) {
+        set_policy_for(stream, stream->rtmp);
+        stream->request = stream->rtmp;
+        stream->rtmp = NULL;
+        
+        ctx.maxlen = stream->session->s->limit_req_fieldsize;
+        ctx.failed_key = NULL;
+        apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL);
+        if (ctx.failed_key) {
+            ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,  
+                          H2_STRM_LOG(APLOGNO(), stream,"Request header exceeds "
+                                      "LimitRequestFieldSize: %.*s"),
+                          (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
+            set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
+            /* keep on returning APR_SUCCESS, so that we send a HTTP response and
+             * do not RST the stream. */
+        }
+    }
+    return status;
+}
 
 static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
 {
Index: httpd-2.4.33/modules/http2/h2_stream.h
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_stream.h	2019-08-19 16:16:12.879818882 +0200
+++ httpd-2.4.33/modules/http2/h2_stream.h	2019-08-19 16:16:13.103820304 +0200
@@ -198,6 +198,10 @@ apr_status_t h2_stream_set_request_rec(h
 apr_status_t h2_stream_add_header(h2_stream *stream,
                                   const char *name, size_t nlen,
                                   const char *value, size_t vlen);
+                                  
+/* End the contruction of request headers */
+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
+
 
 apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
 apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
Index: httpd-2.4.33/modules/http2/h2_task.c
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_task.c	2019-08-19 16:16:12.975819491 +0200
+++ httpd-2.4.33/modules/http2/h2_task.c	2019-08-19 16:16:13.103820304 +0200
@@ -406,8 +406,15 @@ int h2_task_can_redo(h2_task *task) {
             || !strcmp("OPTIONS", task->request->method));
 }
 
+int h2_task_has_started(h2_task *task)
+{
+    return task && task->started_at != 0;
+}
+
 void h2_task_redo(h2_task *task)
 {
+    task->started_at = 0;
+    task->worker_done = 0;
     task->rst_error = 0;
 }
 
@@ -546,7 +553,6 @@ apr_status_t h2_task_do(h2_task *task, a
     ap_assert(task);
     c = task->c;
     task->worker_started = 1;
-    task->started_at = apr_time_now();
     
     if (c->master) {
         /* Each conn_rec->id is supposed to be unique at a point in time. Since
Index: httpd-2.4.33/modules/http2/h2_task.h
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_task.h	2019-08-19 16:16:12.975819491 +0200
+++ httpd-2.4.33/modules/http2/h2_task.h	2019-08-19 16:16:13.103820304 +0200
@@ -80,6 +80,7 @@ struct h2_task {
     
     unsigned int filters_set    : 1;
     unsigned int worker_started : 1; /* h2_worker started processing */
+    unsigned int redo : 1;           /* was throttled, should be restarted later */
 
     int worker_done;                 /* h2_worker finished */
     int done_done;                   /* task_done has been handled */
@@ -101,6 +102,7 @@ apr_status_t h2_task_do(h2_task *task, a
 
 void h2_task_redo(h2_task *task);
 int h2_task_can_redo(h2_task *task);
+int h2_task_has_started(h2_task *task);
 
 /**
  * Reset the task with the given error code, resets all input/output.
Index: httpd-2.4.33/modules/http2/h2_version.h
===================================================================
--- httpd-2.4.33.orig/modules/http2/h2_version.h	2019-08-19 16:16:12.975819491 +0200
+++ httpd-2.4.33/modules/http2/h2_version.h	2019-08-19 16:16:13.103820304 +0200
@@ -27,7 +27,7 @@
  * @macro
  * Version number of the http2 module as c string
  */
-#define MOD_HTTP2_VERSION "1.14.1"
+#define MOD_HTTP2_VERSION "1.15.4"
 
 /**
  * @macro
@@ -35,6 +35,6 @@
  * release. This is a 24 bit number with 8 bits for major number, 8 bits
  * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
  */
-#define MOD_HTTP2_VERSION_NUM 0x010e01
+#define MOD_HTTP2_VERSION_NUM 0x010f04
 
 #endif /* mod_h2_h2_version_h */
Index: httpd-2.4.33/server/eor_bucket.c
===================================================================
--- httpd-2.4.33.orig/server/eor_bucket.c	2010-06-14 21:07:02.000000000 +0200
+++ httpd-2.4.33/server/eor_bucket.c	2019-08-19 16:16:13.103820304 +0200
@@ -19,17 +19,22 @@
 #include "http_protocol.h"
 #include "scoreboard.h"
 
+typedef struct {
+    apr_bucket_refcount refcount;
+    request_rec *data;
+} ap_bucket_eor;
+
 static apr_status_t eor_bucket_cleanup(void *data)
 {
-    apr_bucket *b = (apr_bucket *)data;
-    request_rec *r = (request_rec *)b->data;
+    request_rec **rp = data;
 
-    if (r != NULL) {
+    if (*rp) {
+        request_rec *r = *rp;
         /*
          * If eor_bucket_destroy is called after us, this prevents
          * eor_bucket_destroy from trying to destroy the pool again.
          */
-        b->data = NULL;
+        *rp = NULL;
         /* Update child status and log the transaction */
         ap_update_child_status(r->connection->sbh, SERVER_BUSY_LOG, r);
         ap_run_log_transaction(r);
@@ -50,11 +55,13 @@ static apr_status_t eor_bucket_read(apr_
 
 AP_DECLARE(apr_bucket *) ap_bucket_eor_make(apr_bucket *b, request_rec *r)
 {
-    b->length      = 0;
-    b->start       = 0;
-    b->data        = r;
-    b->type        = &ap_bucket_type_eor;
+    ap_bucket_eor *h;
+
+    h = apr_bucket_alloc(sizeof(*h), b->list);
+    h->data = r;
 
+    b = apr_bucket_shared_make(b, h, 0, 0);
+    b->type = &ap_bucket_type_eor;
     return b;
 }
 
@@ -66,7 +73,9 @@ AP_DECLARE(apr_bucket *) ap_bucket_eor_c
     APR_BUCKET_INIT(b);
     b->free = apr_bucket_free;
     b->list = list;
+    b = ap_bucket_eor_make(b, r);
     if (r) {
+        ap_bucket_eor *h = b->data;
         /*
          * Register a cleanup for the request pool as the eor bucket could
          * have been allocated from a different pool then the request pool
@@ -76,18 +85,22 @@ AP_DECLARE(apr_bucket *) ap_bucket_eor_c
          * We need to use a pre-cleanup here because a module may create a
          * sub-pool which is still needed during the log_transaction hook.
          */
-        apr_pool_pre_cleanup_register(r->pool, (void *)b, eor_bucket_cleanup);
+        apr_pool_pre_cleanup_register(r->pool, &h->data, eor_bucket_cleanup);
     }
-    return ap_bucket_eor_make(b, r);
+    return b;
 }
 
 static void eor_bucket_destroy(void *data)
 {
-    request_rec *r = (request_rec *)data;
+    ap_bucket_eor *h = data;
 
-    if (r) {
-        /* eor_bucket_cleanup will be called when the pool gets destroyed */
-        apr_pool_destroy(r->pool);
+    if (apr_bucket_shared_destroy(h)) {
+        request_rec *r = h->data;
+        if (r) {
+            /* eor_bucket_cleanup will be called when the pool gets destroyed */
+            apr_pool_destroy(r->pool);
+        }
+        apr_bucket_free(h);
     }
 }
 
@@ -97,6 +110,6 @@ AP_DECLARE_DATA const apr_bucket_type_t
     eor_bucket_read,
     apr_bucket_setaside_noop,
     apr_bucket_split_notimpl,
-    apr_bucket_simple_copy
+    apr_bucket_shared_copy
 };
 
Index: httpd-2.4.33/server/mpm/event/event.c
===================================================================
--- httpd-2.4.33.orig/server/mpm/event/event.c	2019-08-19 16:16:12.995819619 +0200
+++ httpd-2.4.33/server/mpm/event/event.c	2019-08-19 16:16:13.103820304 +0200
@@ -1111,10 +1111,11 @@ read_request:
                           "network write failure in core output filter");
             cs->pub.state = CONN_STATE_LINGER;
         }
-        else if (c->data_in_output_filters) {
+        else if (c->data_in_output_filters ||
+                 cs->pub.sense == CONN_SENSE_WANT_READ) {
             /* Still in WRITE_COMPLETION_STATE:
-             * Set a write timeout for this connection, and let the
-             * event thread poll for writeability.
+             * Set a read/write timeout for this connection, and let the
+             * event thread poll for read/writeability.
              */
             cs->queue_timestamp = apr_time_now();
             notify_suspend(cs);
openSUSE Build Service is sponsored by