File apache2-mod_http2-1.10.12.patch of Package apache2.18661
diff -up --new-file httpd-2.4.23/modules/http2/config2.m4 /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/config2.m4
--- httpd-2.4.23/modules/http2/config2.m4 2016-06-28 21:57:30.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/config2.m4 2017-10-17 13:47:14.000000000 +0200
@@ -21,7 +21,6 @@ http2_objs="dnl
mod_http2.lo dnl
h2_alt_svc.lo dnl
h2_bucket_beam.lo dnl
-h2_bucket_eoc.lo dnl
h2_bucket_eos.lo dnl
h2_config.lo dnl
h2_conn.lo dnl
@@ -30,17 +29,16 @@ h2_ctx.lo dnl
h2_filter.lo dnl
h2_from_h1.lo dnl
h2_h2.lo dnl
+h2_headers.lo dnl
h2_mplx.lo dnl
h2_ngn_shed.lo dnl
h2_push.lo dnl
h2_request.lo dnl
-h2_response.lo dnl
h2_session.lo dnl
h2_stream.lo dnl
h2_switch.lo dnl
h2_task.lo dnl
h2_util.lo dnl
-h2_worker.lo dnl
h2_workers.lo dnl
"
@@ -82,12 +80,18 @@ AC_DEFUN([APACHE_CHECK_NGHTTP2],[
if test -n "$PKGCONFIG"; then
saved_PKG_CONFIG_PATH="$PKG_CONFIG_PATH"
AC_MSG_CHECKING([for pkg-config along $PKG_CONFIG_PATH])
- if test "x$ap_nghttp2_base" != "x" -a \
- -f "${ap_nghttp2_base}/lib/pkgconfig/libnghttp2.pc"; then
- dnl Ensure that the given path is used by pkg-config too, otherwise
- dnl the system libnghttp2.pc might be picked up instead.
- PKG_CONFIG_PATH="${ap_nghttp2_base}/lib/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}"
- export PKG_CONFIG_PATH
+ if test "x$ap_nghttp2_base" != "x" ; then
+ if test -f "${ap_nghttp2_base}/lib/pkgconfig/libnghttp2.pc"; then
+ dnl Ensure that the given path is used by pkg-config too, otherwise
+ dnl the system libnghttp2.pc might be picked up instead.
+ PKG_CONFIG_PATH="${ap_nghttp2_base}/lib/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}"
+ export PKG_CONFIG_PATH
+ elif test -f "${ap_nghttp2_base}/lib64/pkgconfig/libnghttp2.pc"; then
+ dnl Ensure that the given path is used by pkg-config too, otherwise
+ dnl the system libnghttp2.pc might be picked up instead.
+ PKG_CONFIG_PATH="${ap_nghttp2_base}/lib64/pkgconfig${PKG_CONFIG_PATH+:}${PKG_CONFIG_PATH}"
+ export PKG_CONFIG_PATH
+ fi
fi
AC_ARG_ENABLE(nghttp2-staticlib-deps,APACHE_HELP_STRING(--enable-nghttp2-staticlib-deps,[link mod_http2 with dependencies of libnghttp2's static libraries (as indicated by "pkg-config --static"). Must be specified in addition to --enable-http2.]), [
if test "$enableval" = "yes"; then
@@ -154,6 +158,12 @@ dnl # nghttp2 >= 1.3.0: access to stream
dnl # nghttp2 >= 1.5.0: changing stream priorities
AC_CHECK_FUNCS([nghttp2_session_change_stream_priority],
[APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_CHANGE_PRIO"])], [])
+dnl # nghttp2 >= 1.14.0: invalid header callback
+ AC_CHECK_FUNCS([nghttp2_session_callbacks_set_on_invalid_header_callback],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_INVALID_HEADER_CB"])], [])
+dnl # nghttp2 >= 1.15.0: get/set stream window sizes
+ AC_CHECK_FUNCS([nghttp2_session_get_stream_local_window_size],
+ [APR_ADDTO(MOD_CPPFLAGS, ["-DH2_NG2_LOCAL_WIN_SIZE"])], [])
else
AC_MSG_WARN([nghttp2 version is too old])
fi
diff -up --new-file httpd-2.4.23/modules/http2/h2_alt_svc.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_alt_svc.c
--- httpd-2.4.23/modules/http2/h2_alt_svc.c 2016-02-10 00:09:24.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_alt_svc.c 2016-07-20 18:09:06.000000000 +0200
@@ -86,7 +86,7 @@ static int h2_alt_svc_handler(request_re
return DECLINED;
}
- cfg = h2_config_rget(r);
+ cfg = h2_config_sget(r->server);
if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) {
const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used");
if (!alt_svc_used) {
diff -up --new-file httpd-2.4.23/modules/http2/h2_bucket_beam.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_beam.c
--- httpd-2.4.23/modules/http2/h2_bucket_beam.c 2016-06-09 12:38:10.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_beam.c 2017-10-13 10:37:45.000000000 +0200
@@ -14,6 +14,7 @@
*/
#include <apr_lib.h>
+#include <apr_atomic.h>
#include <apr_strings.h>
#include <apr_time.h>
#include <apr_buckets.h>
@@ -21,6 +22,7 @@
#include <apr_thread_cond.h>
#include <httpd.h>
+#include <http_protocol.h>
#include <http_log.h>
#include "h2_private.h"
@@ -66,7 +68,7 @@ struct h2_beam_proxy {
apr_bucket_refcount refcount;
APR_RING_ENTRY(h2_beam_proxy) link;
h2_bucket_beam *beam;
- apr_bucket *bred;
+ apr_bucket *bsender;
apr_size_t n;
};
@@ -76,9 +78,9 @@ static apr_status_t beam_bucket_read(apr
apr_size_t *len, apr_read_type_e block)
{
h2_beam_proxy *d = b->data;
- if (d->bred) {
+ if (d->bsender) {
const char *data;
- apr_status_t status = apr_bucket_read(d->bred, &data, len, block);
+ apr_status_t status = apr_bucket_read(d->bsender, &data, len, block);
if (status == APR_SUCCESS) {
*str = data + b->start;
*len = b->length;
@@ -109,24 +111,24 @@ static void beam_bucket_destroy(void *da
static apr_bucket * h2_beam_bucket_make(apr_bucket *b,
h2_bucket_beam *beam,
- apr_bucket *bred, apr_size_t n)
+ apr_bucket *bsender, apr_size_t n)
{
h2_beam_proxy *d;
d = apr_bucket_alloc(sizeof(*d), b->list);
H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d);
d->beam = beam;
- d->bred = bred;
+ d->bsender = bsender;
d->n = n;
- b = apr_bucket_shared_make(b, d, 0, bred? bred->length : 0);
+ b = apr_bucket_shared_make(b, d, 0, bsender? bsender->length : 0);
b->type = &h2_bucket_type_beam;
return b;
}
static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
- apr_bucket *bred,
+ apr_bucket *bsender,
apr_bucket_alloc_t *list,
apr_size_t n)
{
@@ -135,28 +137,9 @@ static apr_bucket *h2_beam_bucket_create
APR_BUCKET_INIT(b);
b->free = apr_bucket_free;
b->list = list;
- return h2_beam_bucket_make(b, beam, bred, n);
+ return h2_beam_bucket_make(b, beam, bsender, n);
}
-/*static apr_status_t beam_bucket_setaside(apr_bucket *b, apr_pool_t *pool)
-{
- apr_status_t status = APR_SUCCESS;
- h2_beam_proxy *d = b->data;
- if (d->bred) {
- const char *data;
- apr_size_t len;
-
- status = apr_bucket_read(d->bred, &data, &len, APR_BLOCK_READ);
- if (status == APR_SUCCESS) {
- b = apr_bucket_heap_make(b, (char *)data + b->start, b->length, NULL);
- if (b == NULL) {
- return APR_ENOMEM;
- }
- }
- }
- return status;
-}*/
-
const apr_bucket_type_t h2_bucket_type_beam = {
"BEAM", 5, APR_BUCKET_DATA,
beam_bucket_destroy,
@@ -169,51 +152,65 @@ const apr_bucket_type_t h2_bucket_type_b
/*******************************************************************************
* h2_blist, a brigade without allocations
******************************************************************************/
-
-apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
- const char *tag, const char *sep,
- h2_blist *bl)
+
+static apr_array_header_t *beamers;
+
+static apr_status_t cleanup_beamers(void *dummy)
{
- apr_size_t off = 0;
- const char *sp = "";
- apr_bucket *b;
-
- if (bl) {
- memset(buffer, 0, bmax--);
- off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
- for (b = H2_BLIST_FIRST(bl);
- bmax && (b != H2_BLIST_SENTINEL(bl));
- b = APR_BUCKET_NEXT(b)) {
+ (void)dummy;
+ beamers = NULL;
+ return APR_SUCCESS;
+}
+
+void h2_register_bucket_beamer(h2_bucket_beamer *beamer)
+{
+ if (!beamers) {
+ apr_pool_cleanup_register(apr_hook_global_pool, NULL,
+ cleanup_beamers, apr_pool_cleanup_null);
+ beamers = apr_array_make(apr_hook_global_pool, 10,
+ sizeof(h2_bucket_beamer*));
+ }
+ APR_ARRAY_PUSH(beamers, h2_bucket_beamer*) = beamer;
+}
+
+static apr_bucket *h2_beam_bucket(h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src)
+{
+ apr_bucket *b = NULL;
+ int i;
+ if (beamers) {
+ for (i = 0; i < beamers->nelts && b == NULL; ++i) {
+ h2_bucket_beamer *beamer;
- off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
- sp = " ";
+ beamer = APR_ARRAY_IDX(beamers, i, h2_bucket_beamer*);
+ b = beamer(beam, dest, src);
}
- off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
- }
- else {
- off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
}
- return off;
+ return b;
}
-
/*******************************************************************************
* bucket beam that can transport buckets across threads
******************************************************************************/
+static void mutex_leave(void *ctx, apr_thread_mutex_t *lock)
+{
+ apr_thread_mutex_unlock(lock);
+}
+
+static apr_status_t mutex_enter(void *ctx, h2_beam_lock *pbl)
+{
+ h2_bucket_beam *beam = ctx;
+ pbl->mutex = beam->lock;
+ pbl->leave = mutex_leave;
+ return apr_thread_mutex_lock(pbl->mutex);
+}
+
static apr_status_t enter_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
{
- h2_beam_mutex_enter *enter = beam->m_enter;
- if (enter) {
- void *ctx = beam->m_ctx;
- if (ctx) {
- return enter(ctx, pbl);
- }
- }
- pbl->mutex = NULL;
- pbl->leave = NULL;
- return APR_SUCCESS;
+ return mutex_enter(beam, pbl);
}
static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
@@ -223,18 +220,65 @@ static void leave_yellow(h2_bucket_beam
}
}
-static apr_off_t calc_buffered(h2_bucket_beam *beam)
+static apr_off_t bucket_mem_used(apr_bucket *b)
{
- apr_off_t len = 0;
+ if (APR_BUCKET_IS_FILE(b)) {
+ return 0;
+ }
+ else {
+ /* should all have determinate length */
+ return b->length;
+ }
+}
+
+static int report_consumption(h2_bucket_beam *beam, h2_beam_lock *pbl)
+{
+ int rv = 0;
+ apr_off_t len = beam->received_bytes - beam->cons_bytes_reported;
+ h2_beam_io_callback *cb = beam->cons_io_cb;
+
+ if (len > 0) {
+ if (cb) {
+ void *ctx = beam->cons_ctx;
+
+ if (pbl) leave_yellow(beam, pbl);
+ cb(ctx, beam, len);
+ if (pbl) enter_yellow(beam, pbl);
+ rv = 1;
+ }
+ beam->cons_bytes_reported += len;
+ }
+ return rv;
+}
+
+static void report_prod_io(h2_bucket_beam *beam, int force, h2_beam_lock *pbl)
+{
+ apr_off_t len = beam->sent_bytes - beam->prod_bytes_reported;
+ if (force || len > 0) {
+ h2_beam_io_callback *cb = beam->prod_io_cb;
+ if (cb) {
+ void *ctx = beam->prod_ctx;
+
+ leave_yellow(beam, pbl);
+ cb(ctx, beam, len);
+ enter_yellow(beam, pbl);
+ }
+ beam->prod_bytes_reported += len;
+ }
+}
+
+static apr_size_t calc_buffered(h2_bucket_beam *beam)
+{
+ apr_size_t len = 0;
apr_bucket *b;
- for (b = H2_BLIST_FIRST(&beam->red);
- b != H2_BLIST_SENTINEL(&beam->red);
+ for (b = H2_BLIST_FIRST(&beam->send_list);
+ b != H2_BLIST_SENTINEL(&beam->send_list);
b = APR_BUCKET_NEXT(b)) {
if (b->length == ((apr_size_t)-1)) {
/* do not count */
}
else if (APR_BUCKET_IS_FILE(b)) {
- /* if unread, has no real mem footprint. how to test? */
+ /* if unread, has no real mem footprint. */
}
else {
len += b->length;
@@ -243,14 +287,14 @@ static apr_off_t calc_buffered(h2_bucket
return len;
}
-static void r_purge_reds(h2_bucket_beam *beam)
+static void r_purge_sent(h2_bucket_beam *beam)
{
- apr_bucket *bred;
- /* delete all red buckets in purge brigade, needs to be called
- * from red thread only */
- while (!H2_BLIST_EMPTY(&beam->purge)) {
- bred = H2_BLIST_FIRST(&beam->purge);
- apr_bucket_delete(bred);
+ apr_bucket *b;
+ /* delete all sender buckets in purge brigade, needs to be called
+ * from sender thread only */
+ while (!H2_BLIST_EMPTY(&beam->purge_list)) {
+ b = H2_BLIST_FIRST(&beam->purge_list);
+ apr_bucket_delete(b);
}
}
@@ -263,30 +307,80 @@ static apr_size_t calc_space_left(h2_buc
return APR_SIZE_MAX;
}
-static apr_status_t wait_cond(h2_bucket_beam *beam, apr_thread_mutex_t *lock)
+static int buffer_is_empty(h2_bucket_beam *beam)
+{
+ return ((!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer))
+ && H2_BLIST_EMPTY(&beam->send_list));
+}
+
+static apr_status_t wait_empty(h2_bucket_beam *beam, apr_read_type_e block,
+ apr_thread_mutex_t *lock)
{
- if (beam->timeout > 0) {
- return apr_thread_cond_timedwait(beam->m_cond, lock, beam->timeout);
+ apr_status_t rv = APR_SUCCESS;
+
+ while (!buffer_is_empty(beam) && APR_SUCCESS == rv) {
+ if (APR_BLOCK_READ != block || !lock) {
+ rv = APR_EAGAIN;
+ }
+ else if (beam->timeout > 0) {
+ rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout);
+ }
+ else {
+ rv = apr_thread_cond_wait(beam->change, lock);
+ }
}
- else {
- return apr_thread_cond_wait(beam->m_cond, lock);
+ return rv;
+}
+
+static apr_status_t wait_not_empty(h2_bucket_beam *beam, apr_read_type_e block,
+ apr_thread_mutex_t *lock)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ while (buffer_is_empty(beam) && APR_SUCCESS == rv) {
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ else if (beam->closed) {
+ rv = APR_EOF;
+ }
+ else if (APR_BLOCK_READ != block || !lock) {
+ rv = APR_EAGAIN;
+ }
+ else if (beam->timeout > 0) {
+ rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout);
+ }
+ else {
+ rv = apr_thread_cond_wait(beam->change, lock);
+ }
}
+ return rv;
}
-static apr_status_t r_wait_space(h2_bucket_beam *beam, apr_read_type_e block,
- h2_beam_lock *pbl, apr_off_t *premain)
+static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block,
+ apr_size_t *pspace_left, h2_beam_lock *bl)
{
- *premain = calc_space_left(beam);
- while (!beam->aborted && *premain <= 0
- && (block == APR_BLOCK_READ) && pbl->mutex) {
- apr_status_t status = wait_cond(beam, pbl->mutex);
- if (APR_STATUS_IS_TIMEUP(status)) {
- return status;
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t left;
+
+ while (0 == (left = calc_space_left(beam)) && APR_SUCCESS == rv) {
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ else if (block != APR_BLOCK_READ || !bl->mutex) {
+ rv = APR_EAGAIN;
+ }
+ else {
+ if (beam->timeout > 0) {
+ rv = apr_thread_cond_timedwait(beam->change, bl->mutex, beam->timeout);
+ }
+ else {
+ rv = apr_thread_cond_wait(beam->change, bl->mutex);
+ }
}
- r_purge_reds(beam);
- *premain = calc_space_left(beam);
}
- return beam->aborted? APR_ECONNABORTED : APR_SUCCESS;
+ *pspace_left = left;
+ return rv;
}
static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
@@ -298,34 +392,34 @@ static void h2_beam_emitted(h2_bucket_be
/* even when beam buckets are split, only the one where
* refcount drops to 0 will call us */
H2_BPROXY_REMOVE(proxy);
- /* invoked from green thread, the last beam bucket for the red
- * bucket bred is about to be destroyed.
+ /* invoked from receiver thread, the last beam bucket for the send
+ * bucket is about to be destroyed.
* remove it from the hold, where it should be now */
- if (proxy->bred) {
- for (b = H2_BLIST_FIRST(&beam->hold);
- b != H2_BLIST_SENTINEL(&beam->hold);
+ if (proxy->bsender) {
+ for (b = H2_BLIST_FIRST(&beam->hold_list);
+ b != H2_BLIST_SENTINEL(&beam->hold_list);
b = APR_BUCKET_NEXT(b)) {
- if (b == proxy->bred) {
+ if (b == proxy->bsender) {
break;
}
}
- if (b != H2_BLIST_SENTINEL(&beam->hold)) {
+ if (b != H2_BLIST_SENTINEL(&beam->hold_list)) {
/* bucket is in hold as it should be, mark this one
* and all before it for purging. We might have placed meta
- * buckets without a green proxy into the hold before it
+ * buckets without a receiver proxy into the hold before it
* and schedule them for purging now */
- for (b = H2_BLIST_FIRST(&beam->hold);
- b != H2_BLIST_SENTINEL(&beam->hold);
+ for (b = H2_BLIST_FIRST(&beam->hold_list);
+ b != H2_BLIST_SENTINEL(&beam->hold_list);
b = next) {
next = APR_BUCKET_NEXT(b);
- if (b == proxy->bred) {
+ if (b == proxy->bsender) {
APR_BUCKET_REMOVE(b);
- H2_BLIST_INSERT_TAIL(&beam->purge, b);
+ H2_BLIST_INSERT_TAIL(&beam->purge_list, b);
break;
}
else if (APR_BUCKET_IS_METADATA(b)) {
APR_BUCKET_REMOVE(b);
- H2_BLIST_INSERT_TAIL(&beam->purge, b);
+ H2_BLIST_INSERT_TAIL(&beam->purge_list, b);
}
else {
/* another data bucket before this one in hold. this
@@ -334,50 +428,28 @@ static void h2_beam_emitted(h2_bucket_be
}
}
- proxy->bred = NULL;
+ proxy->bsender = NULL;
}
else {
/* it should be there unless we screwed up */
- ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->red_pool,
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->send_pool,
APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not "
"in hold, n=%d", beam->id, beam->tag,
(int)proxy->n);
- AP_DEBUG_ASSERT(!proxy->bred);
+ ap_assert(!proxy->bsender);
}
}
/* notify anyone waiting on space to become available */
if (!bl.mutex) {
- r_purge_reds(beam);
+ r_purge_sent(beam);
}
- else if (beam->m_cond) {
- apr_thread_cond_broadcast(beam->m_cond);
+ else {
+ apr_thread_cond_broadcast(beam->change);
}
leave_yellow(beam, &bl);
}
}
-static void report_consumption(h2_bucket_beam *beam, int force)
-{
- if (force || beam->received_bytes != beam->reported_consumed_bytes) {
- if (beam->consumed_fn) {
- beam->consumed_fn(beam->consumed_ctx, beam, beam->received_bytes
- - beam->reported_consumed_bytes);
- }
- beam->reported_consumed_bytes = beam->received_bytes;
- }
-}
-
-static void report_production(h2_bucket_beam *beam, int force)
-{
- if (force || beam->sent_bytes != beam->reported_produced_bytes) {
- if (beam->produced_fn) {
- beam->produced_fn(beam->produced_ctx, beam, beam->sent_bytes
- - beam->reported_produced_bytes);
- }
- beam->reported_produced_bytes = beam->sent_bytes;
- }
-}
-
static void h2_blist_cleanup(h2_blist *bl)
{
apr_bucket *e;
@@ -392,64 +464,188 @@ static apr_status_t beam_close(h2_bucket
{
if (!beam->closed) {
beam->closed = 1;
- if (beam->m_cond) {
- apr_thread_cond_broadcast(beam->m_cond);
- }
+ apr_thread_cond_broadcast(beam->change);
}
return APR_SUCCESS;
}
-static apr_status_t beam_cleanup(void *data)
+int h2_beam_is_closed(h2_bucket_beam *beam)
+{
+ return beam->closed;
+}
+
+static int pool_register(h2_bucket_beam *beam, apr_pool_t *pool,
+ apr_status_t (*cleanup)(void *))
+{
+ if (pool && pool != beam->pool) {
+ apr_pool_pre_cleanup_register(pool, beam, cleanup);
+ return 1;
+ }
+ return 0;
+}
+
+static int pool_kill(h2_bucket_beam *beam, apr_pool_t *pool,
+ apr_status_t (*cleanup)(void *)) {
+ if (pool && pool != beam->pool) {
+ apr_pool_cleanup_kill(pool, beam, cleanup);
+ return 1;
+ }
+ return 0;
+}
+
+static apr_status_t beam_recv_cleanup(void *data)
{
h2_bucket_beam *beam = data;
-
- beam_close(beam);
- r_purge_reds(beam);
- h2_blist_cleanup(&beam->red);
- report_consumption(beam, 0);
+ /* receiver pool has gone away, clear references */
+ beam->recv_buffer = NULL;
+ beam->recv_pool = NULL;
+ return APR_SUCCESS;
+}
+
+static apr_status_t beam_send_cleanup(void *data)
+{
+ h2_bucket_beam *beam = data;
+ /* sender is going away, clear up all references to its memory */
+ r_purge_sent(beam);
+ h2_blist_cleanup(&beam->send_list);
+ report_consumption(beam, NULL);
while (!H2_BPROXY_LIST_EMPTY(&beam->proxies)) {
h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies);
H2_BPROXY_REMOVE(proxy);
proxy->beam = NULL;
- proxy->bred = NULL;
+ proxy->bsender = NULL;
}
- h2_blist_cleanup(&beam->purge);
- h2_blist_cleanup(&beam->hold);
-
+ h2_blist_cleanup(&beam->purge_list);
+ h2_blist_cleanup(&beam->hold_list);
+ beam->send_pool = NULL;
return APR_SUCCESS;
}
+static void beam_set_send_pool(h2_bucket_beam *beam, apr_pool_t *pool)
+{
+ if (beam->send_pool != pool) {
+ if (beam->send_pool && beam->send_pool != beam->pool) {
+ pool_kill(beam, beam->send_pool, beam_send_cleanup);
+ beam_send_cleanup(beam);
+ }
+ beam->send_pool = pool;
+ pool_register(beam, beam->send_pool, beam_send_cleanup);
+ }
+}
+
+static void recv_buffer_cleanup(h2_bucket_beam *beam, h2_beam_lock *bl)
+{
+ if (beam->recv_buffer && !APR_BRIGADE_EMPTY(beam->recv_buffer)) {
+ apr_bucket_brigade *bb = beam->recv_buffer;
+ apr_off_t bblen = 0;
+
+ beam->recv_buffer = NULL;
+ apr_brigade_length(bb, 0, &bblen);
+ beam->received_bytes += bblen;
+
+ /* need to do this unlocked since bucket destroy might
+ * call this beam again. */
+ if (bl) leave_yellow(beam, bl);
+ apr_brigade_destroy(bb);
+ if (bl) enter_yellow(beam, bl);
+
+ if (beam->cons_ev_cb) {
+ beam->cons_ev_cb(beam->cons_ctx, beam);
+ }
+ }
+}
+
+static apr_status_t beam_cleanup(void *data)
+{
+ h2_bucket_beam *beam = data;
+ apr_status_t status = APR_SUCCESS;
+ int safe_send = (beam->owner == H2_BEAM_OWNER_SEND);
+ int safe_recv = (beam->owner == H2_BEAM_OWNER_RECV);
+
+ /*
+ * Owner of the beam is going away, depending on which side it owns,
+ * cleanup strategies will differ.
+ *
+ * In general, receiver holds references to memory from sender.
+ * Clean up receiver first, if safe, then cleanup sender, if safe.
+ */
+
+ /* When modify send is not safe, this means we still have multi-thread
+ * protection and the owner is receiving the buckets. If the sending
+ * side has not gone away, this means we could have dangling buckets
+ * in our lists that never get destroyed. This should not happen. */
+ ap_assert(safe_send || !beam->send_pool);
+ if (!H2_BLIST_EMPTY(&beam->send_list)) {
+ ap_assert(beam->send_pool);
+ }
+
+ if (safe_recv) {
+ if (beam->recv_pool) {
+ pool_kill(beam, beam->recv_pool, beam_recv_cleanup);
+ beam->recv_pool = NULL;
+ }
+ recv_buffer_cleanup(beam, NULL);
+ }
+ else {
+ beam->recv_buffer = NULL;
+ beam->recv_pool = NULL;
+ }
+
+ if (safe_send && beam->send_pool) {
+ pool_kill(beam, beam->send_pool, beam_send_cleanup);
+ status = beam_send_cleanup(beam);
+ }
+
+ if (safe_recv) {
+ ap_assert(H2_BPROXY_LIST_EMPTY(&beam->proxies));
+ ap_assert(H2_BLIST_EMPTY(&beam->send_list));
+ ap_assert(H2_BLIST_EMPTY(&beam->hold_list));
+ ap_assert(H2_BLIST_EMPTY(&beam->purge_list));
+ }
+ return status;
+}
+
apr_status_t h2_beam_destroy(h2_bucket_beam *beam)
{
- apr_pool_cleanup_kill(beam->red_pool, beam, beam_cleanup);
+ apr_pool_cleanup_kill(beam->pool, beam, beam_cleanup);
return beam_cleanup(beam);
}
-apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *red_pool,
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *pool,
int id, const char *tag,
- apr_size_t max_buf_size)
+ h2_beam_owner_t owner,
+ apr_size_t max_buf_size,
+ apr_interval_time_t timeout)
{
h2_bucket_beam *beam;
- apr_status_t status = APR_SUCCESS;
+ apr_status_t rv = APR_SUCCESS;
- beam = apr_pcalloc(red_pool, sizeof(*beam));
+ beam = apr_pcalloc(pool, sizeof(*beam));
if (!beam) {
return APR_ENOMEM;
}
beam->id = id;
beam->tag = tag;
- H2_BLIST_INIT(&beam->red);
- H2_BLIST_INIT(&beam->hold);
- H2_BLIST_INIT(&beam->purge);
+ beam->pool = pool;
+ beam->owner = owner;
+ H2_BLIST_INIT(&beam->send_list);
+ H2_BLIST_INIT(&beam->hold_list);
+ H2_BLIST_INIT(&beam->purge_list);
H2_BPROXY_LIST_INIT(&beam->proxies);
- beam->red_pool = red_pool;
+ beam->tx_mem_limits = 1;
beam->max_buf_size = max_buf_size;
+ beam->timeout = timeout;
- apr_pool_pre_cleanup_register(red_pool, beam, beam_cleanup);
- *pbeam = beam;
-
- return status;
+ rv = apr_thread_mutex_create(&beam->lock, APR_THREAD_MUTEX_DEFAULT, pool);
+ if (APR_SUCCESS == rv) {
+ rv = apr_thread_cond_create(&beam->change, pool);
+ if (APR_SUCCESS == rv) {
+ apr_pool_pre_cleanup_register(pool, beam, beam_cleanup);
+ *pbeam = beam;
+ }
+ }
+ return rv;
}
void h2_beam_buffer_size_set(h2_bucket_beam *beam, apr_size_t buffer_size)
@@ -474,21 +670,6 @@ apr_size_t h2_beam_buffer_size_get(h2_bu
return buffer_size;
}
-void h2_beam_mutex_set(h2_bucket_beam *beam,
- h2_beam_mutex_enter m_enter,
- apr_thread_cond_t *cond,
- void *m_ctx)
-{
- h2_beam_lock bl;
-
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->m_enter = m_enter;
- beam->m_ctx = m_ctx;
- beam->m_cond = cond;
- leave_yellow(beam, &bl);
- }
-}
-
void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout)
{
h2_beam_lock bl;
@@ -516,13 +697,13 @@ void h2_beam_abort(h2_bucket_beam *beam)
h2_beam_lock bl;
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- r_purge_reds(beam);
- h2_blist_cleanup(&beam->red);
- beam->aborted = 1;
- report_consumption(beam, 0);
- if (beam->m_cond) {
- apr_thread_cond_broadcast(beam->m_cond);
+ if (!beam->aborted) {
+ beam->aborted = 1;
+ r_purge_sent(beam);
+ h2_blist_cleanup(&beam->send_list);
+ report_consumption(beam, &bl);
}
+ apr_thread_cond_broadcast(beam->change);
leave_yellow(beam, &bl);
}
}
@@ -532,140 +713,145 @@ apr_status_t h2_beam_close(h2_bucket_bea
h2_beam_lock bl;
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- r_purge_reds(beam);
+ r_purge_sent(beam);
beam_close(beam);
- report_consumption(beam, 0);
+ report_consumption(beam, &bl);
leave_yellow(beam, &bl);
}
return beam->aborted? APR_ECONNABORTED : APR_SUCCESS;
}
-apr_status_t h2_beam_shutdown(h2_bucket_beam *beam, apr_read_type_e block,
- int clear_buffers)
+apr_status_t h2_beam_leave(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ recv_buffer_cleanup(beam, &bl);
+ beam->aborted = 1;
+ beam_close(beam);
+ leave_yellow(beam, &bl);
+ }
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block)
{
apr_status_t status;
h2_beam_lock bl;
if ((status = enter_yellow(beam, &bl)) == APR_SUCCESS) {
- if (clear_buffers) {
- r_purge_reds(beam);
- h2_blist_cleanup(&beam->red);
- }
- beam_close(beam);
-
- while (status == APR_SUCCESS
- && (!H2_BPROXY_LIST_EMPTY(&beam->proxies)
- || (beam->green && !APR_BRIGADE_EMPTY(beam->green)))) {
- if (block == APR_NONBLOCK_READ || !bl.mutex) {
- status = APR_EAGAIN;
- break;
- }
- if (beam->m_cond) {
- apr_thread_cond_broadcast(beam->m_cond);
- }
- status = wait_cond(beam, bl.mutex);
- }
+ status = wait_empty(beam, block, bl.mutex);
leave_yellow(beam, &bl);
}
return status;
}
+static void move_to_hold(h2_bucket_beam *beam,
+ apr_bucket_brigade *sender_bb)
+{
+ apr_bucket *b;
+ while (sender_bb && !APR_BRIGADE_EMPTY(sender_bb)) {
+ b = APR_BRIGADE_FIRST(sender_bb);
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->send_list, b);
+ }
+}
+
static apr_status_t append_bucket(h2_bucket_beam *beam,
- apr_bucket *bred,
+ apr_bucket *b,
apr_read_type_e block,
- apr_pool_t *pool,
+ apr_size_t *pspace_left,
h2_beam_lock *pbl)
{
const char *data;
apr_size_t len;
- apr_off_t space_left = 0;
apr_status_t status;
+ int can_beam, check_len;
+
+ if (beam->aborted) {
+ return APR_ECONNABORTED;
+ }
- if (APR_BUCKET_IS_METADATA(bred)) {
- if (APR_BUCKET_IS_EOS(bred)) {
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
beam->closed = 1;
}
- APR_BUCKET_REMOVE(bred);
- H2_BLIST_INSERT_TAIL(&beam->red, bred);
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->send_list, b);
return APR_SUCCESS;
}
- else if (APR_BUCKET_IS_FILE(bred)) {
- /* file bucket lengths do not really count */
+ else if (APR_BUCKET_IS_FILE(b)) {
+ /* For file buckets the problem is their internal readpool that
+ * is used on the first read to allocate buffer/mmap.
+ * Since setting aside a file bucket will de-register the
+ * file cleanup function from the previous pool, we need to
+ * call that only from the sender thread.
+ *
+ * Currently, we do not handle file bucket with refcount > 1 as
+ * the beam is then not in complete control of the file's lifetime.
+ * Which results in the bug that a file get closed by the receiver
+ * while the sender or the beam still have buckets using it.
+ *
+ * Additionally, we allow callbacks to prevent beaming file
+ * handles across. The use case for this is to limit the number
+ * of open file handles and rather use a less efficient beam
+ * transport. */
+ apr_bucket_file *bf = b->data;
+ apr_file_t *fd = bf->fd;
+ can_beam = (bf->refcount.refcount == 1);
+ if (can_beam && beam->can_beam_fn) {
+ can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
+ }
+ check_len = !can_beam;
}
else {
- space_left = calc_space_left(beam);
- if (space_left > 0 && bred->length == ((apr_size_t)-1)) {
+ if (b->length == ((apr_size_t)-1)) {
const char *data;
- status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
if (status != APR_SUCCESS) {
return status;
}
}
-
- if (space_left < bred->length) {
- status = r_wait_space(beam, block, pbl, &space_left);
- if (status != APR_SUCCESS) {
- return status;
- }
- if (space_left <= 0) {
- return APR_EAGAIN;
- }
- }
- /* space available, maybe need bucket split */
+ check_len = 1;
}
+ if (check_len) {
+ if (b->length > *pspace_left) {
+ apr_bucket_split(b, *pspace_left);
+ }
+ *pspace_left -= b->length;
+ }
- /* The fundamental problem is that reading a red bucket from
- * a green thread is a total NO GO, because the bucket might use
+ /* The fundamental problem is that reading a sender bucket from
+ * a receiver thread is a total NO GO, because the bucket might use
* its pool/bucket_alloc from a foreign thread and that will
* corrupt. */
status = APR_ENOTIMPL;
- if (beam->closed && bred->length > 0) {
- status = APR_EOF;
- }
- else if (APR_BUCKET_IS_TRANSIENT(bred)) {
+ if (APR_BUCKET_IS_TRANSIENT(b)) {
/* this takes care of transient buckets and converts them
* into heap ones. Other bucket types might or might not be
* affected by this. */
- status = apr_bucket_setaside(bred, pool);
+ status = apr_bucket_setaside(b, beam->send_pool);
}
- else if (APR_BUCKET_IS_HEAP(bred)) {
- /* For heap buckets read from a green thread is fine. The
+ else if (APR_BUCKET_IS_HEAP(b)) {
+ /* For heap buckets read from a receiver thread is fine. The
* data will be there and live until the bucket itself is
* destroyed. */
status = APR_SUCCESS;
}
- else if (APR_BUCKET_IS_POOL(bred)) {
+ else if (APR_BUCKET_IS_POOL(b)) {
/* pool buckets are bastards that register at pool cleanup
* to morph themselves into heap buckets. That may happen anytime,
* even after the bucket data pointer has been read. So at
- * any time inside the green thread, the pool bucket memory
+ * any time inside the receiver thread, the pool bucket memory
* may disappear. yikes. */
- status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
if (status == APR_SUCCESS) {
- apr_bucket_heap_make(bred, data, len, NULL);
+ apr_bucket_heap_make(b, data, len, NULL);
}
}
- else if (APR_BUCKET_IS_FILE(bred)) {
- /* For file buckets the problem is their internal readpool that
- * is used on the first read to allocate buffer/mmap.
- * Since setting aside a file bucket will de-register the
- * file cleanup function from the previous pool, we need to
- * call that from a red thread.
- * Additionally, we allow callbacks to prevent beaming file
- * handles across. The use case for this is to limit the number
- * of open file handles and rather use a less efficient beam
- * transport. */
- apr_file_t *fd = ((apr_bucket_file *)bred->data)->fd;
- int can_beam = 1;
- if (beam->last_beamed != fd && beam->can_beam_fn) {
- can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
- }
- if (can_beam) {
- beam->last_beamed = fd;
- status = apr_bucket_setaside(bred, pool);
- }
- /* else: enter ENOTIMPL case below */
+ else if (APR_BUCKET_IS_FILE(b) && can_beam) {
+ status = apr_bucket_setaside(b, beam->send_pool);
}
if (status == APR_ENOTIMPL) {
@@ -673,17 +859,11 @@ static apr_status_t append_bucket(h2_buc
* but hope that after read, its data stays immutable for the
* lifetime of the bucket. (see pool bucket handling above for
* a counter example).
- * We do the read while in a red thread, so that the bucket may
+ * We do the read while in the sender thread, so that the bucket may
* use pools/allocators safely. */
- if (space_left < APR_BUCKET_BUFF_SIZE) {
- space_left = APR_BUCKET_BUFF_SIZE;
- }
- if (space_left < bred->length) {
- apr_bucket_split(bred, space_left);
- }
- status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
if (status == APR_SUCCESS) {
- status = apr_bucket_setaside(bred, pool);
+ status = apr_bucket_setaside(b, beam->send_pool);
}
}
@@ -691,44 +871,65 @@ static apr_status_t append_bucket(h2_buc
return status;
}
- APR_BUCKET_REMOVE(bred);
- H2_BLIST_INSERT_TAIL(&beam->red, bred);
- beam->sent_bytes += bred->length;
-
+ APR_BUCKET_REMOVE(b);
+ H2_BLIST_INSERT_TAIL(&beam->send_list, b);
+ beam->sent_bytes += b->length;
+
return APR_SUCCESS;
}
+void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p)
+{
+ h2_beam_lock bl;
+ /* Called from the sender thread to add buckets to the beam */
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ r_purge_sent(beam);
+ beam_set_send_pool(beam, p);
+ leave_yellow(beam, &bl);
+ }
+}
+
apr_status_t h2_beam_send(h2_bucket_beam *beam,
- apr_bucket_brigade *red_brigade,
+ apr_bucket_brigade *sender_bb,
apr_read_type_e block)
{
- apr_bucket *bred;
- apr_status_t status = APR_SUCCESS;
+ apr_bucket *b;
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t space_left = 0;
h2_beam_lock bl;
- /* Called from the red thread to add buckets to the beam */
+ /* Called from the sender thread to add buckets to the beam */
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- r_purge_reds(beam);
+ ap_assert(beam->send_pool);
+ r_purge_sent(beam);
if (beam->aborted) {
- status = APR_ECONNABORTED;
+ move_to_hold(beam, sender_bb);
+ rv = APR_ECONNABORTED;
}
- else if (red_brigade) {
- int force_report = !APR_BRIGADE_EMPTY(red_brigade);
- while (!APR_BRIGADE_EMPTY(red_brigade)
- && status == APR_SUCCESS) {
- bred = APR_BRIGADE_FIRST(red_brigade);
- status = append_bucket(beam, bred, block, beam->red_pool, &bl);
- }
- report_production(beam, force_report);
- if (beam->m_cond) {
- apr_thread_cond_broadcast(beam->m_cond);
+ else if (sender_bb) {
+ int force_report = !APR_BRIGADE_EMPTY(sender_bb);
+
+ space_left = calc_space_left(beam);
+ while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) {
+ if (space_left <= 0) {
+ report_prod_io(beam, force_report, &bl);
+ rv = wait_not_full(beam, block, &space_left, &bl);
+ if (APR_SUCCESS != rv) {
+ break;
+ }
+ }
+ b = APR_BRIGADE_FIRST(sender_bb);
+ rv = append_bucket(beam, b, block, &space_left, &bl);
}
+
+ report_prod_io(beam, force_report, &bl);
+ apr_thread_cond_broadcast(beam->change);
}
- report_consumption(beam, 0);
+ report_consumption(beam, &bl);
leave_yellow(beam, &bl);
}
- return status;
+ return rv;
}
apr_status_t h2_beam_receive(h2_bucket_beam *beam,
@@ -737,63 +938,75 @@ apr_status_t h2_beam_receive(h2_bucket_b
apr_off_t readbytes)
{
h2_beam_lock bl;
- apr_bucket *bred, *bgreen, *ng;
+ apr_bucket *bsender, *brecv, *ng;
int transferred = 0;
apr_status_t status = APR_SUCCESS;
- apr_off_t remain = readbytes;
+ apr_off_t remain;
+ int transferred_buckets = 0;
- /* Called from the green thread to take buckets from the beam */
+ /* Called from the receiver thread to take buckets from the beam */
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ if (readbytes <= 0) {
+ readbytes = APR_SIZE_MAX;
+ }
+ remain = readbytes;
+
transfer:
if (beam->aborted) {
- if (beam->green && !APR_BRIGADE_EMPTY(beam->green)) {
- apr_brigade_cleanup(beam->green);
- }
+ recv_buffer_cleanup(beam, &bl);
status = APR_ECONNABORTED;
goto leave;
}
- /* transfer enough buckets from our green brigade, if we have one */
- while (beam->green
- && !APR_BRIGADE_EMPTY(beam->green)
- && (readbytes <= 0 || remain >= 0)) {
- bgreen = APR_BRIGADE_FIRST(beam->green);
- if (readbytes > 0 && bgreen->length > 0 && remain <= 0) {
+ /* transfer enough buckets from our receiver brigade, if we have one */
+ while (remain >= 0
+ && beam->recv_buffer
+ && !APR_BRIGADE_EMPTY(beam->recv_buffer)) {
+
+ brecv = APR_BRIGADE_FIRST(beam->recv_buffer);
+ if (brecv->length > 0 && remain <= 0) {
break;
}
- APR_BUCKET_REMOVE(bgreen);
- APR_BRIGADE_INSERT_TAIL(bb, bgreen);
- remain -= bgreen->length;
+ APR_BUCKET_REMOVE(brecv);
+ APR_BRIGADE_INSERT_TAIL(bb, brecv);
+ remain -= brecv->length;
++transferred;
}
- /* transfer from our red brigade, transforming red buckets to
- * green ones until we have enough */
- while (!H2_BLIST_EMPTY(&beam->red) && (readbytes <= 0 || remain >= 0)) {
- bred = H2_BLIST_FIRST(&beam->red);
- bgreen = NULL;
-
- if (readbytes > 0 && bred->length > 0 && remain <= 0) {
+ /* transfer from our sender brigade, transforming sender buckets to
+ * receiver ones until we have enough */
+ while (remain >= 0 && !H2_BLIST_EMPTY(&beam->send_list)) {
+
+ brecv = NULL;
+ bsender = H2_BLIST_FIRST(&beam->send_list);
+ if (bsender->length > 0 && remain <= 0) {
break;
}
- if (APR_BUCKET_IS_METADATA(bred)) {
- if (APR_BUCKET_IS_EOS(bred)) {
- bgreen = apr_bucket_eos_create(bb->bucket_alloc);
+ if (APR_BUCKET_IS_METADATA(bsender)) {
+ if (APR_BUCKET_IS_EOS(bsender)) {
+ brecv = apr_bucket_eos_create(bb->bucket_alloc);
beam->close_sent = 1;
}
- else if (APR_BUCKET_IS_FLUSH(bred)) {
- bgreen = apr_bucket_flush_create(bb->bucket_alloc);
+ else if (APR_BUCKET_IS_FLUSH(bsender)) {
+ brecv = apr_bucket_flush_create(bb->bucket_alloc);
}
- else {
- /* put red into hold, no green sent out */
+ else if (AP_BUCKET_IS_ERROR(bsender)) {
+ ap_bucket_error *eb = (ap_bucket_error *)bsender;
+ brecv = ap_bucket_error_create(eb->status, eb->data,
+ bb->p, bb->bucket_alloc);
}
}
- else if (APR_BUCKET_IS_FILE(bred)) {
+ else if (bsender->length == 0) {
+ APR_BUCKET_REMOVE(bsender);
+ H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
+ continue;
+ }
+ else if (APR_BUCKET_IS_FILE(bsender)) {
/* This is set aside into the target brigade pool so that
* any read operation messes with that pool and not
- * the red one. */
- apr_bucket_file *f = (apr_bucket_file *)bred->data;
+ * the sender one. */
+ apr_bucket_file *f = (apr_bucket_file *)bsender->data;
apr_file_t *fd = f->fd;
int setaside = (f->readpool != bb->p);
@@ -804,7 +1017,7 @@ transfer:
}
++beam->files_beamed;
}
- ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length,
+ ng = apr_brigade_insert_file(bb, fd, bsender->start, bsender->length,
bb->p);
#if APR_HAS_MMAP
/* disable mmap handling as this leads to segfaults when
@@ -812,55 +1025,70 @@ transfer:
* been handed out. See also PR 59348 */
apr_bucket_file_enable_mmap(ng, 0);
#endif
- remain -= bred->length;
- ++transferred;
- APR_BUCKET_REMOVE(bred);
- H2_BLIST_INSERT_TAIL(&beam->hold, bred);
+ APR_BUCKET_REMOVE(bsender);
+ H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
+
+ remain -= bsender->length;
++transferred;
+ ++transferred_buckets;
continue;
}
else {
- /* create a "green" standin bucket. we took care about the
- * underlying red bucket and its data when we placed it into
- * the red brigade.
- * the beam bucket will notify us on destruction that bred is
+ /* create a "receiver" standin bucket. we took care about the
+ * underlying sender bucket and its data when we placed it into
+ * the sender brigade.
+ * the beam bucket will notify us on destruction that bsender is
* no longer needed. */
- bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc,
+ brecv = h2_beam_bucket_create(beam, bsender, bb->bucket_alloc,
beam->buckets_sent++);
}
- /* Place the red bucket into our hold, to be destroyed when no
- * green bucket references it any more. */
- APR_BUCKET_REMOVE(bred);
- H2_BLIST_INSERT_TAIL(&beam->hold, bred);
- beam->received_bytes += bred->length;
- if (bgreen) {
- APR_BRIGADE_INSERT_TAIL(bb, bgreen);
- remain -= bgreen->length;
+ /* Place the sender bucket into our hold, to be destroyed when no
+ * receiver bucket references it any more. */
+ APR_BUCKET_REMOVE(bsender);
+ H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
+
+ beam->received_bytes += bsender->length;
+ ++transferred_buckets;
+
+ if (brecv) {
+ APR_BRIGADE_INSERT_TAIL(bb, brecv);
+ remain -= brecv->length;
++transferred;
}
+ else {
+ /* let outside hook determine how bucket is beamed */
+ leave_yellow(beam, &bl);
+ brecv = h2_beam_bucket(beam, bb, bsender);
+ enter_yellow(beam, &bl);
+
+ while (brecv && brecv != APR_BRIGADE_SENTINEL(bb)) {
+ ++transferred;
+ remain -= brecv->length;
+ brecv = APR_BUCKET_NEXT(brecv);
+ }
+ }
}
- if (readbytes > 0 && remain < 0) {
- /* too much, put some back */
+ if (remain < 0) {
+ /* too much, put some back into out recv_buffer */
remain = readbytes;
- for (bgreen = APR_BRIGADE_FIRST(bb);
- bgreen != APR_BRIGADE_SENTINEL(bb);
- bgreen = APR_BUCKET_NEXT(bgreen)) {
- remain -= bgreen->length;
- if (remain < 0) {
- apr_bucket_split(bgreen, bgreen->length+remain);
- beam->green = apr_brigade_split_ex(bb,
- APR_BUCKET_NEXT(bgreen),
- beam->green);
- break;
- }
+ for (brecv = APR_BRIGADE_FIRST(bb);
+ brecv != APR_BRIGADE_SENTINEL(bb);
+ brecv = APR_BUCKET_NEXT(brecv)) {
+ remain -= (beam->tx_mem_limits? bucket_mem_used(brecv)
+ : brecv->length);
+ if (remain < 0) {
+ apr_bucket_split(brecv, brecv->length+remain);
+ beam->recv_buffer = apr_brigade_split_ex(bb,
+ APR_BUCKET_NEXT(brecv),
+ beam->recv_buffer);
+ break;
+ }
}
}
- if (beam->closed
- && (!beam->green || APR_BRIGADE_EMPTY(beam->green))
- && H2_BLIST_EMPTY(&beam->red)) {
+ if (beam->closed && buffer_is_empty(beam)) {
/* beam is closed and we have nothing more to receive */
if (!beam->close_sent) {
apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc);
@@ -871,22 +1099,23 @@ transfer:
}
}
+ if (transferred_buckets > 0) {
+ if (beam->cons_ev_cb) {
+ beam->cons_ev_cb(beam->cons_ctx, beam);
+ }
+ }
+
if (transferred) {
+ apr_thread_cond_broadcast(beam->change);
status = APR_SUCCESS;
}
- else if (beam->closed) {
- status = APR_EOF;
- }
- else if (block == APR_BLOCK_READ && bl.mutex && beam->m_cond) {
- status = wait_cond(beam, bl.mutex);
+ else {
+ status = wait_not_empty(beam, block, bl.mutex);
if (status != APR_SUCCESS) {
goto leave;
}
goto transfer;
}
- else {
- status = APR_EAGAIN;
- }
leave:
leave_yellow(beam, &bl);
}
@@ -894,25 +1123,25 @@ leave:
}
void h2_beam_on_consumed(h2_bucket_beam *beam,
- h2_beam_io_callback *cb, void *ctx)
+ h2_beam_ev_callback *ev_cb,
+ h2_beam_io_callback *io_cb, void *ctx)
{
h2_beam_lock bl;
-
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->consumed_fn = cb;
- beam->consumed_ctx = ctx;
+ beam->cons_ev_cb = ev_cb;
+ beam->cons_io_cb = io_cb;
+ beam->cons_ctx = ctx;
leave_yellow(beam, &bl);
}
}
void h2_beam_on_produced(h2_bucket_beam *beam,
- h2_beam_io_callback *cb, void *ctx)
+ h2_beam_io_callback *io_cb, void *ctx)
{
h2_beam_lock bl;
-
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->produced_fn = cb;
- beam->produced_ctx = ctx;
+ beam->prod_io_cb = io_cb;
+ beam->prod_ctx = ctx;
leave_yellow(beam, &bl);
}
}
@@ -937,8 +1166,8 @@ apr_off_t h2_beam_get_buffered(h2_bucket
h2_beam_lock bl;
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- for (b = H2_BLIST_FIRST(&beam->red);
- b != H2_BLIST_SENTINEL(&beam->red);
+ for (b = H2_BLIST_FIRST(&beam->send_list);
+ b != H2_BLIST_SENTINEL(&beam->send_list);
b = APR_BUCKET_NEXT(b)) {
/* should all have determinate length */
l += b->length;
@@ -955,16 +1184,10 @@ apr_off_t h2_beam_get_mem_used(h2_bucket
h2_beam_lock bl;
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- for (b = H2_BLIST_FIRST(&beam->red);
- b != H2_BLIST_SENTINEL(&beam->red);
+ for (b = H2_BLIST_FIRST(&beam->send_list);
+ b != H2_BLIST_SENTINEL(&beam->send_list);
b = APR_BUCKET_NEXT(b)) {
- if (APR_BUCKET_IS_FILE(b)) {
- /* do not count */
- }
- else {
- /* should all have determinate length */
- l += b->length;
- }
+ l += bucket_mem_used(b);
}
leave_yellow(beam, &bl);
}
@@ -977,16 +1200,23 @@ int h2_beam_empty(h2_bucket_beam *beam)
h2_beam_lock bl;
if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- empty = (H2_BLIST_EMPTY(&beam->red)
- && (!beam->green || APR_BRIGADE_EMPTY(beam->green)));
+ empty = (H2_BLIST_EMPTY(&beam->send_list)
+ && (!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer)));
leave_yellow(beam, &bl);
}
return empty;
}
-int h2_beam_closed(h2_bucket_beam *beam)
+int h2_beam_holds_proxies(h2_bucket_beam *beam)
{
- return beam->closed;
+ int has_proxies = 1;
+ h2_beam_lock bl;
+
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ has_proxies = !H2_BPROXY_LIST_EMPTY(&beam->proxies);
+ leave_yellow(beam, &bl);
+ }
+ return has_proxies;
}
int h2_beam_was_received(h2_bucket_beam *beam)
@@ -1013,3 +1243,31 @@ apr_size_t h2_beam_get_files_beamed(h2_b
return n;
}
+int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file)
+{
+ return 0;
+}
+
+int h2_beam_report_consumption(h2_bucket_beam *beam)
+{
+ h2_beam_lock bl;
+ int rv = 0;
+ if (enter_yellow(beam, &bl) == APR_SUCCESS) {
+ rv = report_consumption(beam, &bl);
+ leave_yellow(beam, &bl);
+ }
+ return rv;
+}
+
+void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg)
+{
+ if (beam && APLOG_C_IS_LEVEL(c,level)) {
+ ap_log_cerror(APLOG_MARK, level, 0, c,
+ "beam(%ld-%d,%s,closed=%d,aborted=%d,empty=%d,buf=%ld): %s",
+ (c->master? c->master->id : c->id), beam->id, beam->tag,
+ beam->closed, beam->aborted, h2_beam_empty(beam),
+ (long)h2_beam_get_buffered(beam), msg);
+ }
+}
+
+
diff -up --new-file httpd-2.4.23/modules/http2/h2_bucket_beam.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_beam.h
--- httpd-2.4.23/modules/http2/h2_bucket_beam.h 2016-06-09 12:38:10.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_beam.h 2017-10-13 10:37:45.000000000 +0200
@@ -51,19 +51,6 @@ typedef struct {
APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \
} while (0)
-/**
- * Print the buckets in the list into the buffer (type and lengths).
- * @param buffer the buffer to print into
- * @param bmax max number of characters to place in buffer, incl. trailing 0
- * @param tag tag string for this bucket list
- * @param sep separator to use
- * @param bl the bucket list to print
- * @return number of characters printed
- */
-apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
- const char *tag, const char *sep,
- h2_blist *bl);
-
/*******************************************************************************
* h2_bucket_beam
******************************************************************************/
@@ -72,27 +59,25 @@ apr_size_t h2_util_bl_print(char *buffer
* A h2_bucket_beam solves the task of transferring buckets, esp. their data,
* across threads with zero buffer copies.
*
- * When a thread, let's call it the red thread, wants to send buckets to
+ * When a thread, let's call it the sender thread, wants to send buckets to
* another, the green thread, it creates a h2_bucket_beam and adds buckets
* via the h2_beam_send(). It gives the beam to the green thread which then
* can receive buckets into its own brigade via h2_beam_receive().
*
- * Sending and receiving can happen concurrently, if a thread mutex is set
- * for the beam, see h2_beam_mutex_set.
+ * Sending and receiving can happen concurrently.
*
* The beam can limit the amount of data it accepts via the buffer_size. This
- * can also be adjusted during its lifetime. When the beam not only gets a
- * mutex but als a condition variable (in h2_beam_mutex_set()), sends and
- * receives can be done blocking. A timeout can be set for such blocks.
+ * can also be adjusted during its lifetime. Sends and receives can be done blocking.
+ * A timeout can be set for such blocks.
*
* Care needs to be taken when terminating the beam. The beam registers at
* the pool it was created with and will cleanup after itself. However, if
* received buckets do still exist, already freed memory might be accessed.
- * The beam does a AP_DEBUG_ASSERT on this condition.
+ * The beam does a assertion on this condition.
*
* The proper way of shutting down a beam is to first make sure there are no
* more green buckets out there, then cleanup the beam to purge eventually
- * still existing red buckets and then, possibly, terminate the beam itself
+ * still existing sender buckets and then, possibly, terminate the beam itself
* (or the pool it was created with).
*
* The following restrictions apply to bucket transport:
@@ -105,32 +90,32 @@ apr_size_t h2_util_bl_print(char *buffer
* - file buckets will transfer the file itself into a new bucket, if allowed
* - all other buckets are read on send to make sure data is present
*
- * This assures that when the red thread sends its red buckets, the data
- * is made accessible while still on the red side. The red bucket then enters
+ * This assures that when the sender thread sends its sender buckets, the data
+ * is made accessible while still on the sender side. The sender bucket then enters
* the beams hold storage.
- * When the green thread calls receive, red buckets in the hold are wrapped
+ * When the green thread calls receive, sender buckets in the hold are wrapped
* into special beam buckets. Beam buckets on read present the data directly
- * from the internal red one, but otherwise live on the green side. When a
+ * from the internal sender one, but otherwise live on the green side. When a
* beam bucket gets destroyed, it notifies its beam that the corresponding
- * red bucket from the hold may be destroyed.
+ * sender bucket from the hold may be destroyed.
* Since the destruction of green buckets happens in the green thread, any
- * corresponding red bucket can not immediately be destroyed, as that would
+ * corresponding sender bucket can not immediately be destroyed, as that would
* result in race conditions.
- * Instead, the beam transfers such red buckets from the hold to the purge
- * storage. Next time there is a call from the red side, the buckets in
+ * Instead, the beam transfers such sender buckets from the hold to the purge
+ * storage. Next time there is a call from the sender side, the buckets in
* purge will be deleted.
*
- * There are callbacks that can be registered with a beam:
- * - a "consumed" callback that gets called on the red side with the
+ * There are callbacks that can be registesender with a beam:
+ * - a "consumed" callback that gets called on the sender side with the
* amount of data that has been received by the green side. The amount
- * is a delta from the last callback invocation. The red side can trigger
+ * is a delta from the last callback invocation. The sender side can trigger
* these callbacks by calling h2_beam_send() with a NULL brigade.
* - a "can_beam_file" callback that can prohibit the transfer of file handles
* through the beam. This will cause file buckets to be read on send and
* its data buffer will then be transports just like a heap bucket would.
* When no callback is registered, no restrictions apply and all files are
* passed through.
- * File handles transferred to the green side will stay there until the
+ * File handles transfersender to the green side will stay there until the
* receiving brigade's pool is destroyed/cleared. If the pool lives very
* long or if many different files are beamed, the process might run out
* of available file handles.
@@ -154,6 +139,7 @@ typedef apr_status_t h2_beam_mutex_enter
typedef void h2_beam_io_callback(void *ctx, h2_bucket_beam *beam,
apr_off_t bytes);
+typedef void h2_beam_ev_callback(void *ctx, h2_bucket_beam *beam);
typedef struct h2_beam_proxy h2_beam_proxy;
typedef struct {
@@ -163,15 +149,29 @@ typedef struct {
typedef int h2_beam_can_beam_callback(void *ctx, h2_bucket_beam *beam,
apr_file_t *file);
+typedef enum {
+ H2_BEAM_OWNER_SEND,
+ H2_BEAM_OWNER_RECV
+} h2_beam_owner_t;
+
+/**
+ * Will deny all transfer of apr_file_t across the beam and force
+ * a data copy instead.
+ */
+int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file);
+
struct h2_bucket_beam {
int id;
const char *tag;
- h2_blist red;
- h2_blist hold;
- h2_blist purge;
- apr_bucket_brigade *green;
+ apr_pool_t *pool;
+ h2_beam_owner_t owner;
+ h2_blist send_list;
+ h2_blist hold_list;
+ h2_blist purge_list;
+ apr_bucket_brigade *recv_buffer;
h2_bproxy_list proxies;
- apr_pool_t *red_pool;
+ apr_pool_t *send_pool;
+ apr_pool_t *recv_pool;
apr_size_t max_buf_size;
apr_interval_time_t timeout;
@@ -181,22 +181,24 @@ struct h2_bucket_beam {
apr_size_t buckets_sent; /* # of beam buckets sent */
apr_size_t files_beamed; /* how many file handles have been set aside */
- apr_file_t *last_beamed; /* last file beamed */
unsigned int aborted : 1;
unsigned int closed : 1;
unsigned int close_sent : 1;
+ unsigned int tx_mem_limits : 1; /* only memory size counts on transfers */
- void *m_ctx;
- h2_beam_mutex_enter *m_enter;
- struct apr_thread_cond_t *m_cond;
+ struct apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *change;
- apr_off_t reported_consumed_bytes; /* amount of bytes reported as consumed */
- h2_beam_io_callback *consumed_fn;
- void *consumed_ctx;
- apr_off_t reported_produced_bytes; /* amount of bytes reported as produced */
- h2_beam_io_callback *produced_fn;
- void *produced_ctx;
+ apr_off_t cons_bytes_reported; /* amount of bytes reported as consumed */
+ h2_beam_ev_callback *cons_ev_cb;
+ h2_beam_io_callback *cons_io_cb;
+ void *cons_ctx;
+
+ apr_off_t prod_bytes_reported; /* amount of bytes reported as produced */
+ h2_beam_io_callback *prod_io_cb;
+ void *prod_ctx;
+
h2_beam_can_beam_callback *can_beam_fn;
void *can_beam_ctx;
};
@@ -208,22 +210,25 @@ struct h2_bucket_beam {
* mutex and will be used in multiple threads. It needs a pool allocator
* that is only used inside that same mutex.
*
- * @param pbeam will hold the created beam on return
- * @param red_pool pool usable on red side, beam lifeline
+ * @param pbeam will hold the created beam on return
+ * @param pool pool owning the beam, beam will cleanup when pool released
+ * @param id identifier of the beam
+ * @param tag tag identifying beam for logging
+ * @param owner if the beam is owned by the sender or receiver, e.g. if
+ * the pool owner is using this beam for sending or receiving
* @param buffer_size maximum memory footprint of buckets buffered in beam, or
* 0 for no limitation
- *
- * Call from the red side only.
+ * @param timeout timeout for blocking operations
*/
apr_status_t h2_beam_create(h2_bucket_beam **pbeam,
- apr_pool_t *red_pool,
- int id, const char *tag,
- apr_size_t buffer_size);
+ apr_pool_t *pool,
+ int id, const char *tag,
+ h2_beam_owner_t owner,
+ apr_size_t buffer_size,
+ apr_interval_time_t timeout);
/**
* Destroys the beam immediately without cleanup.
- *
- * Call from the red side only.
*/
apr_status_t h2_beam_destroy(h2_bucket_beam *beam);
@@ -233,19 +238,26 @@ apr_status_t h2_beam_destroy(h2_bucket_b
* All accepted buckets are removed from the given brigade. Will return with
* APR_EAGAIN on non-blocking sends when not all buckets could be accepted.
*
- * Call from the red side only.
+ * Call from the sender side only.
*/
apr_status_t h2_beam_send(h2_bucket_beam *beam,
- apr_bucket_brigade *red_buckets,
+ apr_bucket_brigade *bb,
apr_read_type_e block);
/**
+ * Register the pool from which future buckets are send. This defines
+ * the lifetime of the buckets, e.g. the pool should not be cleared/destroyed
+ * until the data is no longer needed (or has been received).
+ */
+void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p);
+
+/**
* Receive buckets from the beam into the given brigade. Will return APR_EOF
* when reading past an EOS bucket. Reads can be blocking until data is
* available or the beam has been closed. Non-blocking calls return APR_EAGAIN
* if no data is available.
*
- * Call from the green side only.
+ * Call from the receiver side only.
*/
apr_status_t h2_beam_receive(h2_bucket_beam *beam,
apr_bucket_brigade *green_buckets,
@@ -253,35 +265,41 @@ apr_status_t h2_beam_receive(h2_bucket_b
apr_off_t readbytes);
/**
- * Determine if beam is closed. May still contain buffered data.
- *
- * Call from red or green side.
+ * Determine if beam is empty.
*/
-int h2_beam_closed(h2_bucket_beam *beam);
+int h2_beam_empty(h2_bucket_beam *beam);
/**
- * Determine if beam is empty.
- *
- * Call from red or green side.
+ * Determine if beam has handed out proxy buckets that are not destroyed.
*/
-int h2_beam_empty(h2_bucket_beam *beam);
+int h2_beam_holds_proxies(h2_bucket_beam *beam);
/**
* Abort the beam. Will cleanup any buffered buckets and answer all send
* and receives with APR_ECONNABORTED.
*
- * Call from the red side only.
+ * Call from the sender side only.
*/
void h2_beam_abort(h2_bucket_beam *beam);
/**
* Close the beam. Sending an EOS bucket serves the same purpose.
*
- * Call from the red side only.
+ * Call from the sender side only.
*/
apr_status_t h2_beam_close(h2_bucket_beam *beam);
/**
+ * Receives leaves the beam, e.g. will no longer read. This will
+ * interrupt any sender blocked writing and fail future send.
+ *
+ * Call from the receiver side only.
+ */
+apr_status_t h2_beam_leave(h2_bucket_beam *beam);
+
+int h2_beam_is_closed(h2_bucket_beam *beam);
+
+/**
* Return APR_SUCCESS when all buckets in transit have been handled.
* When called with APR_BLOCK_READ and a mutex set, will wait until the green
* side has consumed all data. Otherwise APR_EAGAIN is returned.
@@ -289,15 +307,9 @@ apr_status_t h2_beam_close(h2_bucket_bea
* If a timeout is set on the beam, waiting might also time out and
* return APR_ETIMEUP.
*
- * Call from the red side only.
+ * Call from the sender side only.
*/
-apr_status_t h2_beam_shutdown(h2_bucket_beam *beam, apr_read_type_e block,
- int clear_buffers);
-
-void h2_beam_mutex_set(h2_bucket_beam *beam,
- h2_beam_mutex_enter m_enter,
- struct apr_thread_cond_t *cond,
- void *m_ctx);
+apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block);
/**
* Set/get the timeout for blocking read/write operations. Only works
@@ -315,31 +327,53 @@ void h2_beam_buffer_size_set(h2_bucket_b
apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam);
/**
- * Register a callback to be invoked on the red side with the
- * amount of bytes that have been consumed by the red side, since the
+ * Register a callback to be invoked on the sender side with the
+ * amount of bytes that have been consumed by the receiver, since the
* last callback invocation or reset.
* @param beam the beam to set the callback on
- * @param cb the callback or NULL
+ * @param ev_cb the callback or NULL, called when bytes are consumed
+ * @param io_cb the callback or NULL, called on sender with bytes consumed
* @param ctx the context to use in callback invocation
*
- * Call from the red side, callbacks invoked on red side.
+ * Call from the sender side, io callbacks invoked on sender side, ev callback
+ * from any side.
*/
void h2_beam_on_consumed(h2_bucket_beam *beam,
- h2_beam_io_callback *cb, void *ctx);
+ h2_beam_ev_callback *ev_cb,
+ h2_beam_io_callback *io_cb, void *ctx);
+
+/**
+ * Call any registered consumed handler, if any changes have happened
+ * since the last invocation.
+ * @return !=0 iff a handler has been called
+ *
+ * Needs to be invoked from the sending side.
+ */
+int h2_beam_report_consumption(h2_bucket_beam *beam);
/**
- * Register a callback to be invoked on the red side with the
- * amount of bytes that have been consumed by the red side, since the
+ * Register a callback to be invoked on the receiver side with the
+ * amount of bytes that have been produces by the sender, since the
* last callback invocation or reset.
* @param beam the beam to set the callback on
- * @param cb the callback or NULL
+ * @param io_cb the callback or NULL, called on receiver with bytes produced
* @param ctx the context to use in callback invocation
*
- * Call from the red side, callbacks invoked on red side.
+ * Call from the receiver side, callbacks invoked on either side.
*/
void h2_beam_on_produced(h2_bucket_beam *beam,
- h2_beam_io_callback *cb, void *ctx);
+ h2_beam_io_callback *io_cb, void *ctx);
+/**
+ * Register a callback that may prevent a file from being beam as
+ * file handle, forcing the file content to be copied. Then no callback
+ * is set (NULL), file handles are transferred directly.
+ * @param beam the beam to set the callback on
+ * @param io_cb the callback or NULL, called on receiver with bytes produced
+ * @param ctx the context to use in callback invocation
+ *
+ * Call from the receiver side, callbacks invoked on either side.
+ */
void h2_beam_on_file_beam(h2_bucket_beam *beam,
h2_beam_can_beam_callback *cb, void *ctx);
@@ -360,4 +394,12 @@ int h2_beam_was_received(h2_bucket_beam
apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam);
+typedef apr_bucket *h2_bucket_beamer(h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src);
+
+void h2_register_bucket_beamer(h2_bucket_beamer *beamer);
+
+void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg);
+
#endif /* h2_bucket_beam_h */
diff -up --new-file httpd-2.4.23/modules/http2/h2_bucket_eoc.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_eoc.c
--- httpd-2.4.23/modules/http2/h2_bucket_eoc.c 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_eoc.c 1970-01-01 01:00:00.000000000 +0100
@@ -1,110 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_connection.h>
-#include <http_log.h>
-
-#include "h2_private.h"
-#include "h2.h"
-#include "h2_mplx.h"
-#include "h2_session.h"
-#include "h2_bucket_eoc.h"
-
-typedef struct {
- apr_bucket_refcount refcount;
- h2_session *session;
-} h2_bucket_eoc;
-
-static apr_status_t bucket_cleanup(void *data)
-{
- h2_session **psession = data;
-
- if (*psession) {
- /*
- * If bucket_destroy is called after us, this prevents
- * bucket_destroy from trying to destroy the pool again.
- */
- *psession = NULL;
- }
- return APR_SUCCESS;
-}
-
-static apr_status_t bucket_read(apr_bucket *b, const char **str,
- apr_size_t *len, apr_read_type_e block)
-{
- (void)b;
- (void)block;
- *str = NULL;
- *len = 0;
- return APR_SUCCESS;
-}
-
-apr_bucket * h2_bucket_eoc_make(apr_bucket *b, h2_session *session)
-{
- h2_bucket_eoc *h;
-
- h = apr_bucket_alloc(sizeof(*h), b->list);
- h->session = session;
-
- b = apr_bucket_shared_make(b, h, 0, 0);
- b->type = &h2_bucket_type_eoc;
-
- return b;
-}
-
-apr_bucket * h2_bucket_eoc_create(apr_bucket_alloc_t *list, h2_session *session)
-{
- apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
-
- APR_BUCKET_INIT(b);
- b->free = apr_bucket_free;
- b->list = list;
- b = h2_bucket_eoc_make(b, session);
- if (session) {
- h2_bucket_eoc *h = b->data;
- apr_pool_pre_cleanup_register(session->pool, &h->session, bucket_cleanup);
- }
- return b;
-}
-
-static void bucket_destroy(void *data)
-{
- h2_bucket_eoc *h = data;
-
- if (apr_bucket_shared_destroy(h)) {
- h2_session *session = h->session;
- apr_bucket_free(h);
- if (session) {
- h2_session_eoc_callback(session);
- /* all is gone now */
- }
- }
-}
-
-const apr_bucket_type_t h2_bucket_type_eoc = {
- "H2EOC", 5, APR_BUCKET_METADATA,
- bucket_destroy,
- bucket_read,
- apr_bucket_setaside_noop,
- apr_bucket_split_notimpl,
- apr_bucket_shared_copy
-};
-
diff -up --new-file httpd-2.4.23/modules/http2/h2_bucket_eoc.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_eoc.h
--- httpd-2.4.23/modules/http2/h2_bucket_eoc.h 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_eoc.h 1970-01-01 01:00:00.000000000 +0100
@@ -1,32 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef mod_http2_h2_bucket_eoc_h
-#define mod_http2_h2_bucket_eoc_h
-
-struct h2_session;
-
-/** End Of HTTP/2 SESSION (H2EOC) bucket */
-extern const apr_bucket_type_t h2_bucket_type_eoc;
-
-#define H2_BUCKET_IS_H2EOC(e) (e->type == &h2_bucket_type_eoc)
-
-apr_bucket * h2_bucket_eoc_make(apr_bucket *b,
- struct h2_session *session);
-
-apr_bucket * h2_bucket_eoc_create(apr_bucket_alloc_t *list,
- struct h2_session *session);
-
-#endif /* mod_http2_h2_bucket_eoc_h */
diff -up --new-file httpd-2.4.23/modules/http2/h2_bucket_eos.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_eos.c
--- httpd-2.4.23/modules/http2/h2_bucket_eos.c 2016-06-07 13:29:51.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_bucket_eos.c 2017-02-14 16:53:50.000000000 +0100
@@ -95,7 +95,7 @@ static void bucket_destroy(void *data)
}
apr_bucket_free(h);
if (stream) {
- h2_stream_eos_destroy(stream);
+ h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
}
}
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_config.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_config.c
--- httpd-2.4.23/modules/http2/h2_config.c 2016-03-02 12:21:28.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_config.c 2017-03-31 21:41:01.000000000 +0200
@@ -48,12 +48,11 @@ static h2_config defconf = {
-1, /* min workers */
-1, /* max workers */
10 * 60, /* max workers idle secs */
- 64 * 1024, /* stream max mem size */
+ 32 * 1024, /* stream max mem size */
NULL, /* no alt-svcs */
-1, /* alt-svc max age */
0, /* serialize headers */
-1, /* h2 direct mode */
- -1, /* # session extra files */
1, /* modern TLS only */
-1, /* HTTP/1 Upgrade support */
1024*1024, /* TLS warmup size */
@@ -61,7 +60,9 @@ static h2_config defconf = {
1, /* HTTP/2 server push enabled */
NULL, /* map of content-type to priorities */
256, /* push diary size */
-
+ 0, /* copy files across threads */
+ NULL, /* push list */
+ 0, /* early hints, http status 103 */
};
void h2_config_init(apr_pool_t *pool)
@@ -73,7 +74,6 @@ static void *h2_config_create(apr_pool_t
const char *prefix, const char *x)
{
h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
-
const char *s = x? x : "unknown";
char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL);
@@ -87,7 +87,6 @@ static void *h2_config_create(apr_pool_t
conf->alt_svc_max_age = DEF_VAL;
conf->serialize_headers = DEF_VAL;
conf->h2_direct = DEF_VAL;
- conf->session_extra_files = DEF_VAL;
conf->modern_tls_only = DEF_VAL;
conf->h2_upgrade = DEF_VAL;
conf->tls_warmup_size = DEF_VAL;
@@ -95,7 +94,9 @@ static void *h2_config_create(apr_pool_t
conf->h2_push = DEF_VAL;
conf->priorities = NULL;
conf->push_diary_size = DEF_VAL;
-
+ conf->copy_files = DEF_VAL;
+ conf->push_list = NULL;
+ conf->early_hints = DEF_VAL;
return conf;
}
@@ -109,12 +110,11 @@ void *h2_config_create_dir(apr_pool_t *p
return h2_config_create(pool, "dir", x);
}
-void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
+static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
{
h2_config *base = (h2_config *)basev;
h2_config *add = (h2_config *)addv;
h2_config *n = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
-
char *name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
n->name = name;
@@ -128,7 +128,6 @@ void *h2_config_merge(apr_pool_t *pool,
n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age);
n->serialize_headers = H2_CONFIG_GET(add, base, serialize_headers);
n->h2_direct = H2_CONFIG_GET(add, base, h2_direct);
- n->session_extra_files = H2_CONFIG_GET(add, base, session_extra_files);
n->modern_tls_only = H2_CONFIG_GET(add, base, modern_tls_only);
n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
n->tls_warmup_size = H2_CONFIG_GET(add, base, tls_warmup_size);
@@ -141,10 +140,27 @@ void *h2_config_merge(apr_pool_t *pool,
n->priorities = add->priorities? add->priorities : base->priorities;
}
n->push_diary_size = H2_CONFIG_GET(add, base, push_diary_size);
-
+ n->copy_files = H2_CONFIG_GET(add, base, copy_files);
+ if (add->push_list && base->push_list) {
+ n->push_list = apr_array_append(pool, base->push_list, add->push_list);
+ }
+ else {
+ n->push_list = add->push_list? add->push_list : base->push_list;
+ }
+ n->early_hints = H2_CONFIG_GET(add, base, early_hints);
return n;
}
+void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
+{
+ return h2_config_merge(pool, basev, addv);
+}
+
+void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
+{
+ return h2_config_merge(pool, basev, addv);
+}
+
int h2_config_geti(const h2_config *conf, h2_config_var_t var)
{
return (int)h2_config_geti64(conf, var);
@@ -175,8 +191,6 @@ apr_int64_t h2_config_geti64(const h2_co
return H2_CONFIG_GET(conf, &defconf, h2_upgrade);
case H2_CONF_DIRECT:
return H2_CONFIG_GET(conf, &defconf, h2_direct);
- case H2_CONF_SESSION_FILES:
- return H2_CONFIG_GET(conf, &defconf, session_extra_files);
case H2_CONF_TLS_WARMUP_SIZE:
return H2_CONFIG_GET(conf, &defconf, tls_warmup_size);
case H2_CONF_TLS_COOLDOWN_SECS:
@@ -185,6 +199,10 @@ apr_int64_t h2_config_geti64(const h2_co
return H2_CONFIG_GET(conf, &defconf, h2_push);
case H2_CONF_PUSH_DIARY_SIZE:
return H2_CONFIG_GET(conf, &defconf, push_diary_size);
+ case H2_CONF_COPY_FILES:
+ return H2_CONFIG_GET(conf, &defconf, copy_files);
+ case H2_CONF_EARLY_HINTS:
+ return H2_CONFIG_GET(conf, &defconf, early_hints);
default:
return DEF_VAL;
}
@@ -194,7 +212,7 @@ const h2_config *h2_config_sget(server_r
{
h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config,
&http2_module);
- AP_DEBUG_ASSERT(cfg);
+ ap_assert(cfg);
return cfg;
}
@@ -286,7 +304,7 @@ static const char *h2_conf_set_stream_ma
static const char *h2_add_alt_svc(cmd_parms *parms,
void *arg, const char *value)
{
- if (value && strlen(value)) {
+ if (value && *value) {
h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool);
if (!as) {
@@ -313,13 +331,11 @@ static const char *h2_conf_set_alt_svc_m
static const char *h2_conf_set_session_extra_files(cmd_parms *parms,
void *arg, const char *value)
{
- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- apr_int64_t max = (int)apr_atoi64(value);
- if (max < 0) {
- return "value must be a non-negative number";
- }
- cfg->session_extra_files = (int)max;
+ /* deprecated, ignore */
(void)arg;
+ (void)value;
+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, parms->pool, /* NO LOGNO */
+ "H2SessionExtraFiles is obsolete and will be ignored");
return NULL;
}
@@ -384,7 +400,7 @@ static const char *h2_conf_add_push_prio
h2_priority *priority;
int weight;
- if (!strlen(ctype)) {
+ if (!*ctype) {
return "1st argument must be a mime-type, like 'text/css' or '*'";
}
@@ -500,6 +516,93 @@ static const char *h2_conf_set_push_diar
return NULL;
}
+static const char *h2_conf_set_copy_files(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)arg;
+ if (!strcasecmp(value, "On")) {
+ cfg->copy_files = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->copy_files = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
+static void add_push(apr_pool_t *pool, h2_config *conf, h2_push_res *push)
+{
+ h2_push_res *new;
+ if (!conf->push_list) {
+ conf->push_list = apr_array_make(pool, 10, sizeof(*push));
+ }
+ new = apr_array_push(conf->push_list);
+ new->uri_ref = push->uri_ref;
+ new->critical = push->critical;
+}
+
+static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf,
+ const char *arg1, const char *arg2,
+ const char *arg3)
+{
+ h2_config *dconf = (h2_config*)dirconf ;
+ h2_config *sconf = (h2_config*)h2_config_sget(cmd->server);
+ h2_push_res push;
+ const char *last = arg3;
+
+ memset(&push, 0, sizeof(push));
+ if (!strcasecmp("add", arg1)) {
+ push.uri_ref = arg2;
+ }
+ else {
+ push.uri_ref = arg1;
+ last = arg2;
+ if (arg3) {
+ return "too many parameter";
+ }
+ }
+
+ if (last) {
+ if (!strcasecmp("critical", last)) {
+ push.critical = 1;
+ }
+ else {
+ return "unknown last parameter";
+ }
+ }
+
+ /* server command? set both */
+ if (cmd->path == NULL) {
+ add_push(cmd->pool, sconf, &push);
+ add_push(cmd->pool, dconf, &push);
+ }
+ else {
+ add_push(cmd->pool, dconf, &push);
+ }
+
+ return NULL;
+}
+
+static const char *h2_conf_set_early_hints(cmd_parms *parms,
+ void *arg, const char *value)
+{
+ h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
+ if (!strcasecmp(value, "On")) {
+ cfg->early_hints = 1;
+ return NULL;
+ }
+ else if (!strcasecmp(value, "Off")) {
+ cfg->early_hints = 0;
+ return NULL;
+ }
+
+ (void)arg;
+ return "value must be On or Off";
+}
+
#define AP_END_CMD AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL)
const command_rec h2_cmds[] = {
@@ -528,7 +631,7 @@ const command_rec h2_cmds[] = {
AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL,
RSRC_CONF, "on to enable direct HTTP/2 mode"),
AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL,
- RSRC_CONF, "number of extra file a session might keep open"),
+ RSRC_CONF, "number of extra file a session might keep open (obsolete)"),
AP_INIT_TAKE1("H2TLSWarmUpSize", h2_conf_set_tls_warmup_size, NULL,
RSRC_CONF, "number of bytes on TLS connection before doing max writes"),
AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL,
@@ -539,6 +642,12 @@ const command_rec h2_cmds[] = {
RSRC_CONF, "define priority of PUSHed resources per content type"),
AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
RSRC_CONF, "size of push diary"),
+ AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL,
+ OR_FILEINFO, "on to perform copy of file data"),
+ AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL,
+ OR_FILEINFO, "add a resource to be pushed in this location/on this server."),
+ AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL,
+ RSRC_CONF, "on to enable interim status 103 responses"),
AP_END_CMD
};
diff -up --new-file httpd-2.4.23/modules/http2/h2_config.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_config.h
--- httpd-2.4.23/modules/http2/h2_config.h 2016-03-02 12:21:28.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_config.h 2017-03-31 21:41:01.000000000 +0200
@@ -33,17 +33,24 @@ typedef enum {
H2_CONF_ALT_SVC_MAX_AGE,
H2_CONF_SER_HEADERS,
H2_CONF_DIRECT,
- H2_CONF_SESSION_FILES,
H2_CONF_MODERN_TLS_ONLY,
H2_CONF_UPGRADE,
H2_CONF_TLS_WARMUP_SIZE,
H2_CONF_TLS_COOLDOWN_SECS,
H2_CONF_PUSH,
H2_CONF_PUSH_DIARY_SIZE,
+ H2_CONF_COPY_FILES,
+ H2_CONF_EARLY_HINTS,
} h2_config_var_t;
struct apr_hash_t;
struct h2_priority;
+struct h2_push_res;
+
+typedef struct h2_push_res {
+ const char *uri_ref;
+ int critical;
+} h2_push_res;
/* Apache httpd module configuration for h2. */
typedef struct h2_config {
@@ -59,7 +66,6 @@ typedef struct h2_config {
int serialize_headers; /* Use serialized HTTP/1.1 headers for
processing, better compatibility */
int h2_direct; /* if mod_h2 is active directly */
- int session_extra_files; /* # of extra files a session may keep open */
int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
@@ -68,14 +74,16 @@ typedef struct h2_config {
struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
int push_diary_size; /* # of entries in push diary */
+ int copy_files; /* if files shall be copied vs setaside on output */
+ apr_array_header_t *push_list;/* list of h2_push_res configurations */
+ int early_hints; /* support status code 103 */
} h2_config;
void *h2_config_create_dir(apr_pool_t *pool, char *x);
+void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv);
void *h2_config_create_svr(apr_pool_t *pool, server_rec *s);
-void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv);
-
-apr_status_t h2_config_apply_header(const h2_config *config, request_rec *r);
+void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv);
extern const command_rec h2_cmds[];
diff -up --new-file httpd-2.4.23/modules/http2/h2_conn.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_conn.c
--- httpd-2.4.23/modules/http2/h2_conn.c 2016-04-28 14:43:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_conn.c 2017-07-04 14:34:15.000000000 +0200
@@ -14,6 +14,7 @@
*/
#include <assert.h>
+#include <apr_strings.h>
#include <ap_mpm.h>
@@ -25,6 +26,8 @@
#include <http_protocol.h>
#include <http_request.h>
+#include <mpm_common.h>
+
#include "h2_private.h"
#include "h2.h"
#include "h2_config.h"
@@ -35,7 +38,6 @@
#include "h2_stream.h"
#include "h2_h2.h"
#include "h2_task.h"
-#include "h2_worker.h"
#include "h2_workers.h"
#include "h2_conn.h"
#include "h2_version.h"
@@ -45,6 +47,7 @@ static struct h2_workers *workers;
static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN;
static module *mpm_module;
static int async_mpm;
+static int mpm_supported = 1;
static apr_socket_t *dummy_socket;
static void check_modules(int force)
@@ -74,11 +77,18 @@ static void check_modules(int force)
else if (!strcmp("prefork.c", m->name)) {
mpm_type = H2_MPM_PREFORK;
mpm_module = m;
+ /* While http2 can work really well on prefork, it collides
+ * today's use case for prefork: runnning single-thread app engines
+ * like php. If we restrict h2_workers to 1 per process, php will
+ * work fine, but browser will be limited to 1 active request at a
+ * time. */
+ mpm_supported = 0;
break;
}
else if (!strcmp("simple_api.c", m->name)) {
mpm_type = H2_MPM_SIMPLE;
mpm_module = m;
+ mpm_supported = 0;
break;
}
else if (!strcmp("mpm_winnt.c", m->name)) {
@@ -100,12 +110,11 @@ apr_status_t h2_conn_child_init(apr_pool
{
const h2_config *config = h2_config_sget(s);
apr_status_t status = APR_SUCCESS;
- int minw, maxw, max_tx_handles, n;
+ int minw, maxw;
int max_threads_per_child = 0;
int idle_secs = 0;
check_modules(1);
-
ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads_per_child);
status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm);
@@ -123,34 +132,18 @@ apr_status_t h2_conn_child_init(apr_pool
minw = max_threads_per_child;
}
if (maxw <= 0) {
- maxw = minw;
+ /* As a default, this seems to work quite well under mpm_event.
+ * For people enabling http2 under mpm_prefork, start 4 threads unless
+ * configured otherwise. People get unhappy if their http2 requests are
+ * blocking each other. */
+ maxw = H2MAX(3 * minw / 2, 4);
}
- /* How many file handles is it safe to use for transfer
- * to the master connection to be streamed out?
- * Is there a portable APR rlimit on NOFILES? Have not
- * found it. And if, how many of those would we set aside?
- * This leads all into a process wide handle allocation strategy
- * which ultimately would limit the number of accepted connections
- * with the assumption of implicitly reserving n handles for every
- * connection and requiring modules with excessive needs to allocate
- * from a central pool.
- */
- n = h2_config_geti(config, H2_CONF_SESSION_FILES);
- if (n < 0) {
- max_tx_handles = maxw * 2;
- }
- else {
- max_tx_handles = maxw * n;
- }
-
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
- "h2_workers: min=%d max=%d, mthrpchild=%d, tx_files=%d",
- minw, maxw, max_threads_per_child, max_tx_handles);
- workers = h2_workers_create(s, pool, minw, maxw, max_tx_handles);
-
idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS);
- h2_workers_set_max_idle_secs(workers, idle_secs);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d",
+ minw, maxw, max_threads_per_child, idle_secs);
+ workers = h2_workers_create(s, pool, minw, maxw, idle_secs);
ap_register_input_filter("H2_IN", h2_filter_core_input,
NULL, AP_FTYPE_CONNECTION);
@@ -171,6 +164,18 @@ h2_mpm_type_t h2_conn_mpm_type(void)
return mpm_type;
}
+const char *h2_conn_mpm_name(void)
+{
+ check_modules(0);
+ return mpm_module? mpm_module->name : "unknown";
+}
+
+int h2_mpm_supported(void)
+{
+ check_modules(0);
+ return mpm_supported;
+}
+
static module *h2_conn_mpm_module(void)
{
check_modules(0);
@@ -180,6 +185,7 @@ static module *h2_conn_mpm_module(void)
apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
{
h2_session *session;
+ apr_status_t status;
if (!workers) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911)
@@ -188,31 +194,37 @@ apr_status_t h2_conn_setup(h2_ctx *ctx,
}
if (r) {
- session = h2_session_rcreate(r, ctx, workers);
+ status = h2_session_rcreate(&session, r, ctx, workers);
}
else {
- session = h2_session_create(c, ctx, workers);
+ status = h2_session_create(&session, c, ctx, workers);
}
- h2_ctx_session_set(ctx, session);
-
- return APR_SUCCESS;
+ if (status == APR_SUCCESS) {
+ h2_ctx_session_set(ctx, session);
+ }
+ return status;
}
apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
{
apr_status_t status;
int mpm_state = 0;
+ h2_session *session = h2_ctx_session_get(ctx);
+ ap_assert(session);
do {
if (c->cs) {
c->cs->sense = CONN_SENSE_DEFAULT;
+ c->cs->state = CONN_STATE_HANDLER;
}
- status = h2_session_process(h2_ctx_session_get(ctx), async_mpm);
+
+ status = h2_session_process(session, async_mpm);
if (APR_STATUS_IS_EOF(status)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03045)
- "h2_session(%ld): process, closing conn", c->id);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03045), session,
+ "process, closing conn"));
c->keepalive = AP_CONN_CLOSE;
}
else {
@@ -226,53 +238,78 @@ apr_status_t h2_conn_run(struct h2_ctx *
&& c->keepalive == AP_CONN_KEEPALIVE
&& mpm_state != AP_MPMQ_STOPPING);
+ if (c->cs) {
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_CLEANUP:
+ case H2_SESSION_ST_DONE:
+ case H2_SESSION_ST_IDLE:
+ c->cs->state = CONN_STATE_WRITE_COMPLETION;
+ break;
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ default:
+ c->cs->state = CONN_STATE_HANDLER;
+ break;
+
+ }
+ }
+
return DONE;
}
apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c)
{
- apr_status_t status;
-
- status = h2_session_pre_close(h2_ctx_session_get(ctx), async_mpm);
- if (status == APR_SUCCESS) {
- return DONE; /* This is the same, right? */
+ h2_session *session = h2_ctx_session_get(ctx);
+ if (session) {
+ apr_status_t status = h2_session_pre_close(session, async_mpm);
+ return (status == APR_SUCCESS)? DONE : status;
}
- return status;
+ return DONE;
}
-conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent,
- apr_allocator_t *allocator)
+conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
{
+ apr_allocator_t *allocator;
+ apr_status_t status;
apr_pool_t *pool;
conn_rec *c;
void *cfg;
+ module *mpm;
- AP_DEBUG_ASSERT(master);
+ ap_assert(master);
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
- "h2_conn(%ld): create slave", master->id);
+ "h2_stream(%ld-%d): create slave", master->id, slave_id);
/* We create a pool with its own allocator to be used for
* processing a request. This is the only way to have the processing
* independant of its parent pool in the sense that it can work in
- * another thread.
+ * another thread. Also, the new allocator needs its own mutex to
+ * synchronize sub-pools.
*/
- if (!allocator) {
- apr_allocator_create(&allocator);
+ apr_allocator_create(&allocator);
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ status = apr_pool_create_ex(&pool, parent, NULL, allocator);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master,
+ APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
+ master->id, slave_id);
+ return NULL;
}
- apr_pool_create_ex(&pool, parent, NULL, allocator);
- apr_pool_tag(pool, "h2_slave_conn");
apr_allocator_owner_set(allocator, pool);
-
+ apr_pool_tag(pool, "h2_slave_conn");
+
c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
if (c == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
- APLOGNO(02913) "h2_task: creating conn");
+ APLOGNO(02913) "h2_session(%ld-%d): create slave",
+ master->id, slave_id);
+ apr_pool_destroy(pool);
return NULL;
}
memcpy(c, master, sizeof(conn_rec));
-
- /* Replace these */
+
c->master = master;
c->pool = pool;
c->conn_config = ap_create_conn_config(pool);
@@ -282,11 +319,15 @@ conn_rec *h2_slave_create(conn_rec *mast
c->bucket_alloc = apr_bucket_alloc_create(pool);
c->data_in_input_filters = 0;
c->data_in_output_filters = 0;
+ /* prevent mpm_event from making wrong assumptions about this connection,
+ * like e.g. using its socket for an async read check. */
c->clogging_input_filters = 1;
c->log = NULL;
- c->log_id = NULL;
+ c->log_id = apr_psprintf(pool, "%ld-%d",
+ master->id, slave_id);
/* Simulate that we had already a request on this connection. */
c->keepalives = 1;
+ c->aborted = 0;
/* We cannot install the master connection socket on the slaves, as
* modules mess with timeouts/blocking of the socket, with
* unwanted side effects to the master connection processing.
@@ -299,29 +340,22 @@ conn_rec *h2_slave_create(conn_rec *mast
/* TODO: not all mpm modules have learned about slave connections yet.
* copy their config from master to slave.
*/
- if (h2_conn_mpm_module()) {
- cfg = ap_get_module_config(master->conn_config, h2_conn_mpm_module());
- ap_set_module_config(c->conn_config, h2_conn_mpm_module(), cfg);
+ if ((mpm = h2_conn_mpm_module()) != NULL) {
+ cfg = ap_get_module_config(master->conn_config, mpm);
+ ap_set_module_config(c->conn_config, mpm, cfg);
}
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_stream(%ld-%d): created slave", master->id, slave_id);
return c;
}
-void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator)
+void h2_slave_destroy(conn_rec *slave)
{
- apr_pool_t *parent;
- apr_allocator_t *allocator = apr_pool_allocator_get(slave->pool);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave,
- "h2_slave_conn(%ld): destroy (task=%s)", slave->id,
+ "h2_stream(%s): destroy slave",
apr_table_get(slave->notes, H2_TASK_ID_NOTE));
- /* Attache the allocator to the parent pool and return it for
- * reuse, otherwise the own is still the slave pool and it will
- * get destroyed with it. */
- parent = apr_pool_parent_get(slave->pool);
- if (pallocator && parent) {
- apr_allocator_owner_set(allocator, parent);
- *pallocator = allocator;
- }
+ slave->sbh = NULL;
apr_pool_destroy(slave->pool);
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_conn.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_conn.h
--- httpd-2.4.23/modules/http2/h2_conn.h 2016-03-17 17:54:05.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_conn.h 2017-07-04 14:34:15.000000000 +0200
@@ -31,7 +31,7 @@ apr_status_t h2_conn_setup(struct h2_ctx
/**
* Run the HTTP/2 connection in synchronous fashion.
* Return when the HTTP/2 session is done
- * and the connection will close or a fatal error occured.
+ * and the connection will close or a fatal error occurred.
*
* @param ctx the http2 context to run
* @return APR_SUCCESS when session is done.
@@ -64,11 +64,11 @@ typedef enum {
/* Returns the type of MPM module detected */
h2_mpm_type_t h2_conn_mpm_type(void);
+const char *h2_conn_mpm_name(void);
+int h2_mpm_supported(void);
-
-conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent,
- apr_allocator_t *allocator);
-void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator);
+conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent);
+void h2_slave_destroy(conn_rec *slave);
apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
void h2_slave_run_connection(conn_rec *slave);
diff -up --new-file httpd-2.4.23/modules/http2/h2_conn_io.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_conn_io.c
--- httpd-2.4.23/modules/http2/h2_conn_io.c 2016-06-22 15:30:24.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_conn_io.c 2017-02-22 11:07:46.000000000 +0100
@@ -24,7 +24,6 @@
#include <http_request.h>
#include "h2_private.h"
-#include "h2_bucket_eoc.h"
#include "h2_bucket_eos.h"
#include "h2_config.h"
#include "h2_conn_io.h"
@@ -39,6 +38,7 @@
* - TLS overhead (60-100)
* ~= 1300 bytes */
#define WRITE_SIZE_INITIAL 1300
+
/* Calculated like this: max TLS record size 16*1024
* - 40 (IP) - 20 (TCP) - 40 (TCP options)
* - TLS overhead (60-100)
@@ -72,9 +72,6 @@ static void h2_conn_io_bb_log(conn_rec *
else if (AP_BUCKET_IS_EOR(b)) {
off += apr_snprintf(buffer+off, bmax-off, "eor ");
}
- else if (H2_BUCKET_IS_H2EOC(b)) {
- off += apr_snprintf(buffer+off, bmax-off, "h2eoc ");
- }
else if (H2_BUCKET_IS_H2EOS(b)) {
off += apr_snprintf(buffer+off, bmax-off, "h2eos ");
}
@@ -120,20 +117,20 @@ static void h2_conn_io_bb_log(conn_rec *
line = *buffer? buffer : "(empty)";
}
/* Intentional no APLOGNO */
- ap_log_cerror(APLOG_MARK, level, 0, c, "bb_dump(%ld-%d)-%s: %s",
- c->id, stream_id, tag, line);
+ ap_log_cerror(APLOG_MARK, level, 0, c, "h2_session(%ld)-%s: %s",
+ c->id, tag, line);
}
apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
const h2_config *cfg)
{
- io->c = c;
- io->output = apr_brigade_create(c->pool, c->bucket_alloc);
- io->is_tls = h2_h2_is_tls(c);
- io->buffer_output = io->is_tls;
- io->pass_threshold = h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM) / 2;
-
+ io->c = c;
+ io->output = apr_brigade_create(c->pool, c->bucket_alloc);
+ io->is_tls = h2_h2_is_tls(c);
+ io->buffer_output = io->is_tls;
+ io->flush_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM);
+
if (io->is_tls) {
/* This is what we start with,
* see https://issues.apache.org/jira/browse/TS-2503
@@ -161,8 +158,6 @@ apr_status_t h2_conn_io_init(h2_conn_io
return APR_SUCCESS;
}
-#define LOG_SCRATCH 0
-
static void append_scratch(h2_conn_io *io)
{
if (io->scratch && io->slen > 0) {
@@ -170,11 +165,6 @@ static void append_scratch(h2_conn_io *i
apr_bucket_free,
io->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(io->output, b);
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03386)
- "h2_conn_io(%ld): append_scratch(%ld)",
- io->c->id, (long)io->slen);
-#endif
io->scratch = NULL;
io->slen = io->ssize = 0;
}
@@ -206,7 +196,7 @@ static apr_status_t read_to_scratch(h2_c
return APR_SUCCESS;
}
- AP_DEBUG_ASSERT(b->length <= (io->ssize - io->slen));
+ ap_assert(b->length <= (io->ssize - io->slen));
if (APR_BUCKET_IS_FILE(b)) {
apr_bucket_file *f = (apr_bucket_file *)b->data;
apr_file_t *fd = f->fd;
@@ -222,11 +212,6 @@ static apr_status_t read_to_scratch(h2_c
return status;
}
status = apr_file_read(fd, io->scratch + io->slen, &len);
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->c, APLOGNO(03387)
- "h2_conn_io(%ld): FILE_to_scratch(%ld)",
- io->c->id, (long)len);
-#endif
if (status != APR_SUCCESS && status != APR_EOF) {
return status;
}
@@ -235,11 +220,6 @@ static apr_status_t read_to_scratch(h2_c
else {
status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
if (status == APR_SUCCESS) {
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03388)
- "h2_conn_io(%ld): read_to_scratch(%ld)",
- io->c->id, (long)b->length);
-#endif
memcpy(io->scratch+io->slen, data, len);
io->slen += len;
}
@@ -255,54 +235,46 @@ static void check_write_size(h2_conn_io
/* long time not written, reset write size */
io->write_size = WRITE_SIZE_INITIAL;
io->bytes_written = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
- "h2_conn_io(%ld): timeout write size reset to %ld",
- (long)io->c->id, (long)io->write_size);
}
else if (io->write_size < WRITE_SIZE_MAX
&& io->bytes_written >= io->warmup_size) {
/* connection is hot, use max size */
io->write_size = WRITE_SIZE_MAX;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
- "h2_conn_io(%ld): threshold reached, write size now %ld",
- (long)io->c->id, (long)io->write_size);
}
}
-static apr_status_t pass_output(h2_conn_io *io, int flush, int eoc)
+static apr_status_t pass_output(h2_conn_io *io, int flush)
{
conn_rec *c = io->c;
+ apr_bucket_brigade *bb = io->output;
apr_bucket *b;
apr_off_t bblen;
apr_status_t status;
append_scratch(io);
- if (flush) {
+ if (flush && !io->is_flushed) {
b = apr_bucket_flush_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(io->output, b);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
}
- if (APR_BRIGADE_EMPTY(io->output)) {
+ if (APR_BRIGADE_EMPTY(bb)) {
return APR_SUCCESS;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, c, "h2_conn_io: pass_output");
ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL);
- apr_brigade_length(io->output, 0, &bblen);
+ apr_brigade_length(bb, 0, &bblen);
+ h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "out", bb);
- h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", io->output);
- status = ap_pass_brigade(c->output_filters, io->output);
-
- /* careful with access after this, as we might have flushed an EOC bucket
- * that de-allocated us all. */
- if (!eoc) {
- apr_brigade_cleanup(io->output);
- if (status == APR_SUCCESS) {
- io->bytes_written += (apr_size_t)bblen;
- io->last_write = apr_time_now();
+ status = ap_pass_brigade(c->output_filters, bb);
+ if (status == APR_SUCCESS) {
+ io->bytes_written += (apr_size_t)bblen;
+ io->last_write = apr_time_now();
+ if (flush) {
+ io->is_flushed = 1;
}
}
-
+ apr_brigade_cleanup(bb);
+
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044)
"h2_conn_io(%ld): pass_out brigade %ld bytes",
@@ -311,16 +283,27 @@ static apr_status_t pass_output(h2_conn_
return status;
}
-apr_status_t h2_conn_io_flush(h2_conn_io *io)
+int h2_conn_io_needs_flush(h2_conn_io *io)
{
- return pass_output(io, 1, 0);
+ if (!io->is_flushed) {
+ apr_off_t len = h2_brigade_mem_size(io->output);
+ if (len > io->flush_threshold) {
+ return 1;
+ }
+ /* if we do not exceed flush length due to memory limits,
+ * we want at least flush when we have that amount of data. */
+ apr_brigade_length(io->output, 0, &len);
+ return len > (4 * io->flush_threshold);
+ }
+ return 0;
}
-apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, h2_session *session)
+apr_status_t h2_conn_io_flush(h2_conn_io *io)
{
- apr_bucket *b = h2_bucket_eoc_create(io->c->bucket_alloc, session);
- APR_BRIGADE_INSERT_TAIL(io->output, b);
- return pass_output(io, 1, 1);
+ apr_status_t status;
+ status = pass_output(io, 1);
+ check_write_size(io);
+ return status;
}
apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length)
@@ -328,25 +311,19 @@ apr_status_t h2_conn_io_write(h2_conn_io
apr_status_t status = APR_SUCCESS;
apr_size_t remain;
+ if (length > 0) {
+ io->is_flushed = 0;
+ }
+
if (io->buffer_output) {
while (length > 0) {
remain = assure_scratch_space(io);
if (remain >= length) {
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03389)
- "h2_conn_io(%ld): write_to_scratch(%ld)",
- io->c->id, (long)length);
-#endif
memcpy(io->scratch + io->slen, data, length);
io->slen += length;
length = 0;
}
else {
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03390)
- "h2_conn_io(%ld): write_to_scratch(%ld)",
- io->c->id, (long)remain);
-#endif
memcpy(io->scratch + io->slen, data, remain);
io->slen += remain;
data += remain;
@@ -365,7 +342,10 @@ apr_status_t h2_conn_io_pass(h2_conn_io
apr_bucket *b;
apr_status_t status = APR_SUCCESS;
- check_write_size(io);
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ io->is_flushed = 0;
+ }
+
while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
b = APR_BRIGADE_FIRST(bb);
@@ -384,11 +364,6 @@ apr_status_t h2_conn_io_pass(h2_conn_io
/* complete write_size bucket, append unchanged */
APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(io->output, b);
-#if LOG_SCRATCH
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, io->c, APLOGNO(03391)
- "h2_conn_io(%ld): pass bucket(%ld)",
- io->c->id, (long)b->length);
-#endif
continue;
}
}
@@ -408,15 +383,6 @@ apr_status_t h2_conn_io_pass(h2_conn_io
APR_BRIGADE_INSERT_TAIL(io->output, b);
}
}
-
- if (status == APR_SUCCESS) {
- if (!APR_BRIGADE_EMPTY(io->output)) {
- apr_off_t len = h2_brigade_mem_size(io->output);
- if (len >= io->pass_threshold) {
- return pass_output(io, 0, 0);
- }
- }
- }
return status;
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_conn_io.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_conn_io.h
--- httpd-2.4.23/modules/http2/h2_conn_io.h 2016-05-04 15:58:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_conn_io.h 2017-01-21 16:48:17.000000000 +0100
@@ -39,7 +39,8 @@ typedef struct {
apr_int64_t bytes_written;
int buffer_output;
- apr_size_t pass_threshold;
+ apr_size_t flush_threshold;
+ unsigned int is_flushed : 1;
char *scratch;
apr_size_t ssize;
@@ -61,16 +62,15 @@ apr_status_t h2_conn_io_write(h2_conn_io
apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb);
/**
- * Append an End-Of-Connection bucket to the output that, once destroyed,
- * will tear down the complete http2 session.
- */
-apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, struct h2_session *session);
-
-/**
* Pass any buffered data on to the connection output filters.
* @param io the connection io
* @param flush if a flush bucket should be appended to any output
*/
apr_status_t h2_conn_io_flush(h2_conn_io *io);
+/**
+ * Check if the buffered amount of data needs flushing.
+ */
+int h2_conn_io_needs_flush(h2_conn_io *io);
+
#endif /* defined(__mod_h2__h2_conn_io__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_ctx.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_ctx.c
--- httpd-2.4.23/modules/http2/h2_ctx.c 2016-05-04 15:58:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_ctx.c 2016-10-27 18:53:58.000000000 +0200
@@ -27,7 +27,7 @@
static h2_ctx *h2_ctx_create(const conn_rec *c)
{
h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx));
- AP_DEBUG_ASSERT(ctx);
+ ap_assert(ctx);
ap_set_module_config(c->conn_config, &http2_module, ctx);
h2_ctx_server_set(ctx, c->base_server);
return ctx;
@@ -35,7 +35,7 @@ static h2_ctx *h2_ctx_create(const conn_
void h2_ctx_clear(const conn_rec *c)
{
- AP_DEBUG_ASSERT(c);
+ ap_assert(c);
ap_set_module_config(c->conn_config, &http2_module, NULL);
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_filter.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_filter.c
--- httpd-2.4.23/modules/http2/h2_filter.c 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_filter.c 2017-04-10 17:04:55.000000000 +0200
@@ -15,14 +15,17 @@
#include <assert.h>
+#include <apr_strings.h>
#include <httpd.h>
#include <http_core.h>
+#include <http_protocol.h>
#include <http_log.h>
#include <http_connection.h>
#include <scoreboard.h>
#include "h2_private.h"
#include "h2.h"
+#include "h2_config.h"
#include "h2_conn_io.h"
#include "h2_ctx.h"
#include "h2_mplx.h"
@@ -30,7 +33,8 @@
#include "h2_task.h"
#include "h2_stream.h"
#include "h2_request.h"
-#include "h2_response.h"
+#include "h2_headers.h"
+#include "h2_stream.h"
#include "h2_session.h"
#include "h2_util.h"
#include "h2_version.h"
@@ -40,55 +44,80 @@
#define UNSET -1
#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
-static apr_status_t consume_brigade(h2_filter_cin *cin,
- apr_bucket_brigade *bb,
- apr_read_type_e block)
+static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin,
+ apr_bucket *b, apr_read_type_e block)
{
+ h2_session *session = cin->session;
apr_status_t status = APR_SUCCESS;
- apr_size_t readlen = 0;
+ apr_size_t len;
+ const char *data;
+ ssize_t n;
- while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+ status = apr_bucket_read(b, &data, &len, block);
+
+ while (status == APR_SUCCESS && len > 0) {
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
- apr_bucket* bucket = APR_BRIGADE_FIRST(bb);
- if (APR_BUCKET_IS_METADATA(bucket)) {
- /* we do nothing regarding any meta here */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_SSSN_MSG(session, "fed %ld bytes to nghttp2, %ld read"),
+ (long)len, (long)n);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ h2_session_event(session, H2_SESSION_EV_PROTO_ERROR,
+ (int)n, nghttp2_strerror((int)n));
+ status = APR_EGENERAL;
+ }
}
else {
- const char *bucket_data = NULL;
- apr_size_t bucket_length = 0;
- status = apr_bucket_read(bucket, &bucket_data,
- &bucket_length, block);
-
- if (status == APR_SUCCESS && bucket_length > 0) {
- apr_size_t consumed = 0;
-
- status = cin->cb(cin->cb_ctx, bucket_data, bucket_length, &consumed);
- if (status == APR_SUCCESS && bucket_length > consumed) {
- /* We have data left in the bucket. Split it. */
- status = apr_bucket_split(bucket, consumed);
- }
- readlen += consumed;
- cin->start_read = apr_time_now();
+ session->io.bytes_read += n;
+ if (len <= n) {
+ break;
}
+ len -= n;
+ data += n;
+ }
+ }
+
+ return status;
+}
+
+static apr_status_t recv_RAW_brigade(conn_rec *c, h2_filter_cin *cin,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block)
+{
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket* b;
+ int consumed = 0;
+
+ h2_util_bb_log(c, c->id, APLOG_TRACE2, "RAW_in", bb);
+ while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+
+ if (APR_BUCKET_IS_METADATA(b)) {
+ /* nop */
+ }
+ else {
+ status = recv_RAW_DATA(c, cin, b, block);
}
- apr_bucket_delete(bucket);
+ consumed = 1;
+ apr_bucket_delete(b);
}
- if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
+ if (!consumed && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
return APR_EAGAIN;
}
return status;
}
-h2_filter_cin *h2_filter_cin_create(apr_pool_t *p, h2_filter_cin_cb *cb, void *ctx)
+h2_filter_cin *h2_filter_cin_create(h2_session *session)
{
h2_filter_cin *cin;
- cin = apr_pcalloc(p, sizeof(*cin));
- cin->pool = p;
- cin->cb = cb;
- cin->cb_ctx = ctx;
- cin->start_read = UNSET;
+ cin = apr_pcalloc(session->pool, sizeof(*cin));
+ if (!cin) {
+ return NULL;
+ }
+ cin->session = session;
return cin;
}
@@ -106,11 +135,14 @@ apr_status_t h2_filter_core_input(ap_fil
h2_filter_cin *cin = f->ctx;
apr_status_t status = APR_SUCCESS;
apr_interval_time_t saved_timeout = UNSET;
+ const int trace1 = APLOGctrace1(f->c);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "core_input(%ld): read, %s, mode=%d, readbytes=%ld",
- (long)f->c->id, (block == APR_BLOCK_READ)? "BLOCK_READ" : "NONBLOCK_READ",
- mode, (long)readbytes);
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_session(%ld): read, %s, mode=%d, readbytes=%ld",
+ (long)f->c->id, (block == APR_BLOCK_READ)?
+ "BLOCK_READ" : "NONBLOCK_READ", mode, (long)readbytes);
+ }
if (mode == AP_MODE_INIT || mode == AP_MODE_SPECULATIVE) {
return ap_get_brigade(f->next, brigade, mode, block, readbytes);
@@ -121,20 +153,16 @@ apr_status_t h2_filter_core_input(ap_fil
}
if (!cin->bb) {
- cin->bb = apr_brigade_create(cin->pool, f->c->bucket_alloc);
+ cin->bb = apr_brigade_create(cin->session->pool, f->c->bucket_alloc);
}
if (!cin->socket) {
cin->socket = ap_get_conn_socket(f->c);
}
- cin->start_read = apr_time_now();
if (APR_BRIGADE_EMPTY(cin->bb)) {
/* We only do a blocking read when we have no streams to process. So,
* in httpd scoreboard lingo, we are in a KEEPALIVE connection state.
- * When reading non-blocking, we do have streams to process and update
- * child with NULL request. That way, any current request information
- * in the scoreboard is preserved.
*/
if (block == APR_BLOCK_READ) {
if (cin->timeout > 0) {
@@ -151,17 +179,19 @@ apr_status_t h2_filter_core_input(ap_fil
switch (status) {
case APR_SUCCESS:
- status = consume_brigade(cin, cin->bb, block);
+ status = recv_RAW_brigade(f->c, cin, cin->bb, block);
break;
case APR_EOF:
case APR_EAGAIN:
case APR_TIMEUP:
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "core_input(%ld): read", (long)f->c->id);
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_session(%ld): read", f->c->id);
+ }
break;
default:
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046)
- "h2_conn_io: error reading");
+ "h2_session(%ld): error reading", f->c->id);
break;
}
return status;
@@ -171,30 +201,92 @@ apr_status_t h2_filter_core_input(ap_fil
* http2 connection status handler + stream out source
******************************************************************************/
-static const char *H2_SOS_H2_STATUS = "http2-status";
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_bucket_event_cb *cb;
+ void *ctx;
+} h2_bucket_observer;
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
-int h2_filter_h2_status_handler(request_rec *r)
+static void bucket_destroy(void *data)
{
- h2_ctx *ctx = h2_ctx_rget(r);
- h2_task *task;
-
- if (strcmp(r->handler, "http2-status")) {
- return DECLINED;
+ h2_bucket_observer *h = data;
+ if (apr_bucket_shared_destroy(h)) {
+ if (h->cb) {
+ h->cb(h->ctx, H2_BUCKET_EV_BEFORE_DESTROY, NULL);
+ }
+ apr_bucket_free(h);
}
- if (r->method_number != M_GET) {
- return DECLINED;
+}
+
+apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb,
+ void *ctx)
+{
+ h2_bucket_observer *br;
+
+ br = apr_bucket_alloc(sizeof(*br), b->list);
+ br->cb = cb;
+ br->ctx = ctx;
+
+ b = apr_bucket_shared_make(b, br, 0, 0);
+ b->type = &h2_bucket_type_observer;
+ return b;
+}
+
+apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list,
+ h2_bucket_event_cb *cb, void *ctx)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_observer_make(b, cb, ctx);
+ return b;
+}
+
+apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event)
+{
+ if (H2_BUCKET_IS_OBSERVER(b)) {
+ h2_bucket_observer *l = (h2_bucket_observer *)b->data;
+ return l->cb(l->ctx, event, b);
}
+ return APR_EINVAL;
+}
- task = ctx? h2_ctx_get_task(ctx) : NULL;
- if (task) {
- /* We need to handle the actual output on the main thread, as
- * we need to access h2_session information. */
- apr_table_setn(r->notes, H2_RESP_SOS_NOTE, H2_SOS_H2_STATUS);
- apr_table_setn(r->headers_out, "Content-Type", "application/json");
- r->status = 200;
- return DONE;
+const apr_bucket_type_t h2_bucket_type_observer = {
+ "H2OBS", 5, APR_BUCKET_METADATA,
+ bucket_destroy,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
+apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src)
+{
+ if (H2_BUCKET_IS_OBSERVER(src)) {
+ h2_bucket_observer *l = (h2_bucket_observer *)src->data;
+ apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc,
+ l->cb, l->ctx);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ l->cb = NULL;
+ l->ctx = NULL;
+ h2_bucket_observer_fire(b, H2_BUCKET_EV_BEFORE_MASTER_SEND);
+ return b;
}
- return DECLINED;
+ return NULL;
}
static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...)
@@ -209,82 +301,260 @@ static apr_status_t bbout(apr_bucket_bri
return rv;
}
-static apr_status_t h2_status_stream_filter(h2_stream *stream)
+static void add_settings(apr_bucket_brigade *bb, h2_session *s, int last)
{
- h2_session *session = stream->session;
- h2_mplx *mplx = session->mplx;
- conn_rec *c = session->c;
- h2_push_diary *diary;
- apr_bucket_brigade *bb;
- apr_status_t status;
+ h2_mplx *m = s->mplx;
- if (!stream->response) {
- return APR_EINVAL;
- }
-
- if (!stream->buffer) {
- stream->buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
- }
- bb = stream->buffer;
+ bbout(bb, " \"settings\": {\n");
+ bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams);
+ bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024);
+ bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n",
+ h2_config_geti(s->config, H2_CONF_WIN_SIZE));
+ bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s));
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static void add_peer_settings(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ bbout(bb, " \"peerSettings\": {\n");
+ bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS));
+ bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_FRAME_SIZE));
+ bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE));
+ bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_ENABLE_PUSH));
+ bbout(bb, " \"SETTINGS_HEADER_TABLE_SIZE\": %d,\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_HEADER_TABLE_SIZE));
+ bbout(bb, " \"SETTINGS_MAX_HEADER_LIST_SIZE\": %d\n",
+ nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE));
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+typedef struct {
+ apr_bucket_brigade *bb;
+ h2_session *s;
+ int idx;
+} stream_ctx_t;
+
+static int add_stream(h2_stream *stream, void *ctx)
+{
+ stream_ctx_t *x = ctx;
+ int32_t flowIn, flowOut;
- apr_table_unset(stream->response->headers, "Content-Length");
- stream->response->content_length = -1;
+ flowIn = nghttp2_session_get_stream_effective_local_window_size(x->s->ngh2, stream->id);
+ flowOut = nghttp2_session_get_stream_remote_window_size(x->s->ngh2, stream->id);
+ bbout(x->bb, "%s\n \"%d\": {\n", (x->idx? "," : ""), stream->id);
+ bbout(x->bb, " \"state\": \"%s\",\n", h2_stream_state_str(stream));
+ bbout(x->bb, " \"created\": %f,\n", ((double)stream->created)/APR_USEC_PER_SEC);
+ bbout(x->bb, " \"flowIn\": %d,\n", flowIn);
+ bbout(x->bb, " \"flowOut\": %d,\n", flowOut);
+ bbout(x->bb, " \"dataIn\": %"APR_UINT64_T_FMT",\n", stream->in_data_octets);
+ bbout(x->bb, " \"dataOut\": %"APR_UINT64_T_FMT"\n", stream->out_data_octets);
+ bbout(x->bb, " }");
+
+ ++x->idx;
+ return 1;
+}
+
+static void add_streams(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ stream_ctx_t x;
- bbout(bb, "{\n");
- bbout(bb, " \"HTTP2\": \"on\",\n");
- bbout(bb, " \"H2PUSH\": \"%s\",\n", h2_session_push_enabled(session)? "on" : "off");
- bbout(bb, " \"mod_http2_version\": \"%s\",\n", MOD_HTTP2_VERSION);
- bbout(bb, " \"session_id\": %ld,\n", (long)session->id);
- bbout(bb, " \"streams_max\": %d,\n", (int)session->max_stream_count);
- bbout(bb, " \"this_stream\": %d,\n", stream->id);
- bbout(bb, " \"streams_open\": %d,\n", (int)h2_ihash_count(session->streams));
- bbout(bb, " \"max_stream_started\": %d,\n", mplx->max_stream_started);
- bbout(bb, " \"requests_received\": %d,\n", session->remote.emitted_count);
- bbout(bb, " \"responses_submitted\": %d,\n", session->responses_submitted);
- bbout(bb, " \"streams_reset\": %d, \n", session->streams_reset);
- bbout(bb, " \"pushes_promised\": %d,\n", session->pushes_promised);
- bbout(bb, " \"pushes_submitted\": %d,\n", session->pushes_submitted);
- bbout(bb, " \"pushes_reset\": %d,\n", session->pushes_reset);
+ x.bb = bb;
+ x.s = s;
+ x.idx = 0;
+ bbout(bb, " \"streams\": {");
+ h2_mplx_stream_do(s->mplx, add_stream, &x);
+ bbout(bb, "\n }%s\n", last? "" : ",");
+}
+
+static void add_push(apr_bucket_brigade *bb, h2_session *s,
+ h2_stream *stream, int last)
+{
+ h2_push_diary *diary;
+ apr_status_t status;
- diary = session->push_diary;
+ bbout(bb, " \"push\": {\n");
+ diary = s->push_diary;
if (diary) {
const char *data;
const char *base64_digest;
apr_size_t len;
- status = h2_push_diary_digest_get(diary, stream->pool, 256,
- stream->request->authority, &data, &len);
+ status = h2_push_diary_digest_get(diary, bb->p, 256,
+ stream->request->authority,
+ &data, &len);
if (status == APR_SUCCESS) {
- base64_digest = h2_util_base64url_encode(data, len, stream->pool);
- bbout(bb, " \"cache_digest\": \"%s\",\n", base64_digest);
- }
-
- /* try the reverse for testing purposes */
- status = h2_push_diary_digest_set(diary, stream->request->authority, data, len);
- if (status == APR_SUCCESS) {
- status = h2_push_diary_digest_get(diary, stream->pool, 256,
- stream->request->authority, &data, &len);
- if (status == APR_SUCCESS) {
- base64_digest = h2_util_base64url_encode(data, len, stream->pool);
- bbout(bb, " \"cache_digest^2\": \"%s\",\n", base64_digest);
- }
+ base64_digest = h2_util_base64url_encode(data, len, bb->p);
+ bbout(bb, " \"cacheDigest\": \"%s\",\n", base64_digest);
}
}
- bbout(bb, " \"frames_received\": %ld,\n", (long)session->frames_received);
- bbout(bb, " \"frames_sent\": %ld,\n", (long)session->frames_sent);
- bbout(bb, " \"bytes_received\": %"APR_UINT64_T_FMT",\n", session->io.bytes_read);
- bbout(bb, " \"bytes_sent\": %"APR_UINT64_T_FMT"\n", session->io.bytes_written);
+ bbout(bb, " \"promises\": %d,\n", s->pushes_promised);
+ bbout(bb, " \"submits\": %d,\n", s->pushes_submitted);
+ bbout(bb, " \"resets\": %d\n", s->pushes_reset);
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static void add_in(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ bbout(bb, " \"in\": {\n");
+ bbout(bb, " \"requests\": %d,\n", s->remote.emitted_count);
+ bbout(bb, " \"resets\": %d, \n", s->streams_reset);
+ bbout(bb, " \"frames\": %ld,\n", (long)s->frames_received);
+ bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_read);
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static void add_out(apr_bucket_brigade *bb, h2_session *s, int last)
+{
+ bbout(bb, " \"out\": {\n");
+ bbout(bb, " \"responses\": %d,\n", s->responses_submitted);
+ bbout(bb, " \"frames\": %ld,\n", (long)s->frames_sent);
+ bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_written);
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static void add_stats(apr_bucket_brigade *bb, h2_session *s,
+ h2_stream *stream, int last)
+{
+ bbout(bb, " \"stats\": {\n");
+ add_in(bb, s, 0);
+ add_out(bb, s, 0);
+ add_push(bb, s, stream, 1);
+ bbout(bb, " }%s\n", last? "" : ",");
+}
+
+static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b)
+{
+ conn_rec *c = task->c->master;
+ h2_ctx *h2ctx = h2_ctx_get(c, 0);
+ h2_session *session;
+ h2_stream *stream;
+ apr_bucket_brigade *bb;
+ apr_bucket *e;
+ int32_t connFlowIn, connFlowOut;
+
+
+ if (!h2ctx || (session = h2_ctx_session_get(h2ctx)) == NULL) {
+ return APR_SUCCESS;
+ }
+
+ stream = h2_session_stream_get(session, task->stream_id);
+ if (!stream) {
+ /* stream already done */
+ return APR_SUCCESS;
+ }
+
+ bb = apr_brigade_create(stream->pool, c->bucket_alloc);
+
+ connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
+ connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
+
+ bbout(bb, "{\n");
+ bbout(bb, " \"version\": \"draft-01\",\n");
+ add_settings(bb, session, 0);
+ add_peer_settings(bb, session, 0);
+ bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn);
+ bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut);
+ bbout(bb, " \"sentGoAway\": %d,\n", session->local.shutdown);
+
+ add_streams(bb, session, 0);
+
+ add_stats(bb, session, stream, 1);
bbout(bb, "}\n");
+ while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) {
+ APR_BUCKET_REMOVE(e);
+ APR_BUCKET_INSERT_AFTER(b, e);
+ b = e;
+ }
+ apr_brigade_destroy(bb);
+
return APR_SUCCESS;
}
-apr_status_t h2_stream_filter(h2_stream *stream)
+static apr_status_t status_event(void *ctx, h2_bucket_event event,
+ apr_bucket *b)
{
- const char *fname = stream->response? stream->response->sos_filter : NULL;
- if (fname && !strcmp(H2_SOS_H2_STATUS, fname)) {
- return h2_status_stream_filter(stream);
+ h2_task *task = ctx;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, task->c->master,
+ "status_event(%s): %d", task->id, event);
+ switch (event) {
+ case H2_BUCKET_EV_BEFORE_MASTER_SEND:
+ h2_status_insert(task, b);
+ break;
+ default:
+ break;
}
return APR_SUCCESS;
}
+int h2_filter_h2_status_handler(request_rec *r)
+{
+ h2_ctx *ctx = h2_ctx_rget(r);
+ conn_rec *c = r->connection;
+ h2_task *task;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ apr_status_t status;
+
+ if (strcmp(r->handler, "http2-status")) {
+ return DECLINED;
+ }
+ if (r->method_number != M_GET && r->method_number != M_POST) {
+ return DECLINED;
+ }
+
+ task = ctx? h2_ctx_get_task(ctx) : NULL;
+ if (task) {
+
+ if ((status = ap_discard_request_body(r)) != OK) {
+ return status;
+ }
+
+ /* We need to handle the actual output on the main thread, as
+ * we need to access h2_session information. */
+ r->status = 200;
+ r->clength = -1;
+ r->chunked = 1;
+ apr_table_unset(r->headers_out, "Content-Length");
+ ap_set_content_type(r, "application/json");
+ apr_table_setn(r->notes, H2_FILTER_DEBUG_NOTE, "on");
+
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+ b = h2_bucket_observer_create(c->bucket_alloc, status_event, task);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "status_handler(%s): checking for incoming trailers",
+ task->id);
+ if (r->trailers_in && !apr_is_empty_table(r->trailers_in)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "status_handler(%s): seeing incoming trailers",
+ task->id);
+ apr_table_setn(r->trailers_out, "h2-trailers-in",
+ apr_itoa(r->pool, 1));
+ }
+
+ status = ap_pass_brigade(r->output_filters, bb);
+ if (status == APR_SUCCESS
+ || r->status != HTTP_OK
+ || c->aborted) {
+ return OK;
+ }
+ else {
+ /* no way to know what type of error occurred */
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, status, r,
+ "status_handler(%s): ap_pass_brigade failed",
+ task->id);
+ return AP_FILTER_ERROR;
+ }
+ }
+ return DECLINED;
+}
+
diff -up --new-file httpd-2.4.23/modules/http2/h2_filter.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_filter.h
--- httpd-2.4.23/modules/http2/h2_filter.h 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_filter.h 2017-03-31 21:41:01.000000000 +0200
@@ -16,24 +16,21 @@
#ifndef __mod_h2__h2_filter__
#define __mod_h2__h2_filter__
+struct h2_bucket_beam;
+struct h2_headers;
struct h2_stream;
struct h2_session;
-typedef apr_status_t h2_filter_cin_cb(void *ctx,
- const char *data, apr_size_t len,
- apr_size_t *readlen);
-
typedef struct h2_filter_cin {
apr_pool_t *pool;
- apr_bucket_brigade *bb;
- h2_filter_cin_cb *cb;
- void *cb_ctx;
apr_socket_t *socket;
apr_interval_time_t timeout;
- apr_time_t start_read;
+ apr_bucket_brigade *bb;
+ struct h2_session *session;
+ apr_bucket *cur;
} h2_filter_cin;
-h2_filter_cin *h2_filter_cin_create(apr_pool_t *p, h2_filter_cin_cb *cb, void *ctx);
+h2_filter_cin *h2_filter_cin_create(struct h2_session *session);
void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout);
@@ -43,9 +40,33 @@ apr_status_t h2_filter_core_input(ap_fil
apr_read_type_e block,
apr_off_t readbytes);
-#define H2_RESP_SOS_NOTE "h2-sos-filter"
+/******* observer bucket ******************************************************/
+
+typedef enum {
+ H2_BUCKET_EV_BEFORE_DESTROY,
+ H2_BUCKET_EV_BEFORE_MASTER_SEND
+} h2_bucket_event;
+
+extern const apr_bucket_type_t h2_bucket_type_observer;
+
+typedef apr_status_t h2_bucket_event_cb(void *ctx, h2_bucket_event event, apr_bucket *b);
+
+#define H2_BUCKET_IS_OBSERVER(e) (e->type == &h2_bucket_type_observer)
+
+apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb,
+ void *ctx);
+
+apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list,
+ h2_bucket_event_cb *cb, void *ctx);
+
+apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event);
+
+apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src);
+
+/******* /.well-known/h2/state handler ****************************************/
-apr_status_t h2_stream_filter(struct h2_stream *stream);
int h2_filter_h2_status_handler(request_rec *r);
#endif /* __mod_h2__h2_filter__ */
diff -up --new-file httpd-2.4.23/modules/http2/h2_from_h1.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_from_h1.c
--- httpd-2.4.23/modules/http2/h2_from_h1.c 2016-05-18 17:10:20.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_from_h1.c 2017-05-05 15:23:48.000000000 +0200
@@ -16,6 +16,7 @@
#include <assert.h>
#include <stdio.h>
+#include <apr_date.h>
#include <apr_lib.h>
#include <apr_strings.h>
@@ -28,190 +29,12 @@
#include <util_time.h>
#include "h2_private.h"
-#include "h2_response.h"
+#include "h2_headers.h"
#include "h2_from_h1.h"
#include "h2_task.h"
#include "h2_util.h"
-static void set_state(h2_from_h1 *from_h1, h2_from_h1_state_t state);
-
-h2_from_h1 *h2_from_h1_create(int stream_id, apr_pool_t *pool)
-{
- h2_from_h1 *from_h1 = apr_pcalloc(pool, sizeof(h2_from_h1));
- if (from_h1) {
- from_h1->stream_id = stream_id;
- from_h1->pool = pool;
- from_h1->state = H2_RESP_ST_STATUS_LINE;
- from_h1->hlines = apr_array_make(pool, 10, sizeof(char *));
- }
- return from_h1;
-}
-
-static void set_state(h2_from_h1 *from_h1, h2_from_h1_state_t state)
-{
- if (from_h1->state != state) {
- from_h1->state = state;
- }
-}
-
-h2_response *h2_from_h1_get_response(h2_from_h1 *from_h1)
-{
- return from_h1->response;
-}
-
-static apr_status_t make_h2_headers(h2_from_h1 *from_h1, request_rec *r)
-{
- from_h1->response = h2_response_create(from_h1->stream_id, 0,
- from_h1->http_status,
- from_h1->hlines,
- r->notes,
- from_h1->pool);
- from_h1->content_length = from_h1->response->content_length;
- from_h1->chunked = r->chunked;
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection, APLOGNO(03197)
- "h2_from_h1(%d): converted headers, content-length: %d"
- ", chunked=%d",
- from_h1->stream_id, (int)from_h1->content_length,
- (int)from_h1->chunked);
-
- set_state(from_h1, ((from_h1->chunked || from_h1->content_length > 0)?
- H2_RESP_ST_BODY : H2_RESP_ST_DONE));
- /* We are ready to be sent to the client */
- return APR_SUCCESS;
-}
-
-static apr_status_t parse_header(h2_from_h1 *from_h1, ap_filter_t* f,
- char *line) {
- (void)f;
-
- if (line[0] == ' ' || line[0] == '\t') {
- char **plast;
- /* continuation line from the header before this */
- while (line[0] == ' ' || line[0] == '\t') {
- ++line;
- }
-
- plast = apr_array_pop(from_h1->hlines);
- if (plast == NULL) {
- /* not well formed */
- return APR_EINVAL;
- }
- APR_ARRAY_PUSH(from_h1->hlines, const char*) = apr_psprintf(from_h1->pool, "%s %s", *plast, line);
- }
- else {
- /* new header line */
- APR_ARRAY_PUSH(from_h1->hlines, const char*) = apr_pstrdup(from_h1->pool, line);
- }
- return APR_SUCCESS;
-}
-
-static apr_status_t get_line(h2_from_h1 *from_h1, apr_bucket_brigade *bb,
- ap_filter_t* f, char *line, apr_size_t len)
-{
- apr_status_t status;
- if (!from_h1->bb) {
- from_h1->bb = apr_brigade_create(from_h1->pool, f->c->bucket_alloc);
- }
- else {
- apr_brigade_cleanup(from_h1->bb);
- }
- status = apr_brigade_split_line(from_h1->bb, bb,
- APR_BLOCK_READ,
- HUGE_STRING_LEN);
- if (status == APR_SUCCESS) {
- --len;
- status = apr_brigade_flatten(from_h1->bb, line, &len);
- if (status == APR_SUCCESS) {
- /* we assume a non-0 containing line and remove
- * trailing crlf. */
- line[len] = '\0';
- if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
- len -= 2;
- line[len] = '\0';
- }
-
- apr_brigade_cleanup(from_h1->bb);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_from_h1(%d): read line: %s",
- from_h1->stream_id, line);
- }
- }
- return status;
-}
-
-apr_status_t h2_from_h1_read_response(h2_from_h1 *from_h1, ap_filter_t* f,
- apr_bucket_brigade* bb)
-{
- apr_status_t status = APR_SUCCESS;
- char line[HUGE_STRING_LEN];
-
- if ((from_h1->state == H2_RESP_ST_BODY)
- || (from_h1->state == H2_RESP_ST_DONE)) {
- if (from_h1->chunked) {
- /* The httpd core HTTP_HEADER filter has or will install the
- * "CHUNK" output transcode filter, which appears further down
- * the filter chain. We do not want it for HTTP/2.
- * Once we successfully deinstalled it, this filter has no
- * further function and we remove it.
- */
- status = ap_remove_output_filter_byhandle(f->r->output_filters,
- "CHUNK");
- if (status == APR_SUCCESS) {
- ap_remove_output_filter(f);
- }
- }
-
- return ap_pass_brigade(f->next, bb);
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_from_h1(%d): read_response", from_h1->stream_id);
-
- while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
-
- switch (from_h1->state) {
-
- case H2_RESP_ST_STATUS_LINE:
- case H2_RESP_ST_HEADERS:
- status = get_line(from_h1, bb, f, line, sizeof(line));
- if (status != APR_SUCCESS) {
- return status;
- }
- if (from_h1->state == H2_RESP_ST_STATUS_LINE) {
- /* instead of parsing, just take it directly */
- from_h1->http_status = f->r->status;
- from_h1->state = H2_RESP_ST_HEADERS;
- }
- else if (line[0] == '\0') {
- /* end of headers, create the h2_response and
- * pass the rest of the brigade down the filter
- * chain.
- */
- status = make_h2_headers(from_h1, f->r);
- if (from_h1->bb) {
- apr_brigade_destroy(from_h1->bb);
- from_h1->bb = NULL;
- }
- if (!APR_BRIGADE_EMPTY(bb)) {
- return ap_pass_brigade(f->next, bb);
- }
- }
- else {
- status = parse_header(from_h1, f, line);
- }
- break;
-
- default:
- return ap_pass_brigade(f->next, bb);
- }
-
- }
-
- return status;
-}
-
/* This routine is called by apr_table_do and merges all instances of
* the passed field values into a single array that will be further
* processed by some later routine. Originally intended to help split
@@ -280,8 +103,7 @@ static void fix_vary(request_rec *r)
* its comma-separated fieldname values, and then add them to varies
* if not already present in the array.
*/
- apr_table_do((int (*)(void *, const char *, const char *))uniq_field_values,
- (void *) varies, r->headers_out, "Vary", NULL);
+ apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL);
/* If we found any, replace old Vary fields with unique-ified value */
@@ -291,8 +113,8 @@ static void fix_vary(request_rec *r)
}
}
-void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
- apr_pool_t *pool)
+static void set_basic_http_header(apr_table_t *headers, request_rec *r,
+ apr_pool_t *pool)
{
char *date = NULL;
const char *proxy_date = NULL;
@@ -345,7 +167,7 @@ static int copy_header(void *ctx, const
return 1;
}
-static h2_response *create_response(h2_from_h1 *from_h1, request_rec *r)
+static h2_headers *create_response(h2_task *task, request_rec *r)
{
const char *clheader;
const char *ctype;
@@ -450,10 +272,9 @@ static h2_response *create_response(h2_f
headers = apr_table_make(r->pool, 10);
- h2_from_h1_set_basic_http_header(headers, r, r->pool);
+ set_basic_http_header(headers, r, r->pool);
if (r->status == HTTP_NOT_MODIFIED) {
- apr_table_do((int (*)(void *, const char *, const char *)) copy_header,
- (void *) headers, r->headers_out,
+ apr_table_do(copy_header, headers, r->headers_out,
"ETag",
"Content-Location",
"Expires",
@@ -467,118 +288,570 @@ static h2_response *create_response(h2_f
NULL);
}
else {
- apr_table_do((int (*)(void *, const char *, const char *)) copy_header,
- (void *) headers, r->headers_out, NULL);
+ apr_table_do(copy_header, headers, r->headers_out, NULL);
}
- return h2_response_rcreate(from_h1->stream_id, r, headers, r->pool);
+ return h2_headers_rcreate(r, r->status, headers, r->pool);
+}
+
+typedef enum {
+ H2_RP_STATUS_LINE,
+ H2_RP_HEADER_LINE,
+ H2_RP_DONE
+} h2_rp_state_t;
+
+typedef struct h2_response_parser {
+ h2_rp_state_t state;
+ h2_task *task;
+ int http_status;
+ apr_array_header_t *hlines;
+ apr_bucket_brigade *tmp;
+} h2_response_parser;
+
+static apr_status_t parse_header(h2_response_parser *parser, char *line) {
+ const char *hline;
+ if (line[0] == ' ' || line[0] == '\t') {
+ char **plast;
+ /* continuation line from the header before this */
+ while (line[0] == ' ' || line[0] == '\t') {
+ ++line;
+ }
+
+ plast = apr_array_pop(parser->hlines);
+ if (plast == NULL) {
+ /* not well formed */
+ return APR_EINVAL;
+ }
+ hline = apr_psprintf(parser->task->pool, "%s %s", *plast, line);
+ }
+ else {
+ /* new header line */
+ hline = apr_pstrdup(parser->task->pool, line);
+ }
+ APR_ARRAY_PUSH(parser->hlines, const char*) = hline;
+ return APR_SUCCESS;
}
-apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
+ char *line, apr_size_t len)
{
- h2_task *task = f->ctx;
- h2_from_h1 *from_h1 = task->output.from_h1;
- request_rec *r = f->r;
- apr_bucket *b;
- ap_bucket_error *eb = NULL;
+ h2_task *task = parser->task;
+ apr_status_t status;
+
+ if (!parser->tmp) {
+ parser->tmp = apr_brigade_create(task->pool, task->c->bucket_alloc);
+ }
+ status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ,
+ HUGE_STRING_LEN);
+ if (status == APR_SUCCESS) {
+ --len;
+ status = apr_brigade_flatten(parser->tmp, line, &len);
+ if (status == APR_SUCCESS) {
+ /* we assume a non-0 containing line and remove trailing crlf. */
+ line[len] = '\0';
+ if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
+ len -= 2;
+ line[len] = '\0';
+ apr_brigade_cleanup(parser->tmp);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ "h2_task(%s): read response line: %s",
+ task->id, line);
+ }
+ else {
+ /* this does not look like a complete line yet */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ "h2_task(%s): read response, incomplete line: %s",
+ task->id, line);
+ return APR_EAGAIN;
+ }
+ }
+ }
+ apr_brigade_cleanup(parser->tmp);
+ return status;
+}
- AP_DEBUG_ASSERT(from_h1 != NULL);
+static apr_table_t *make_table(h2_response_parser *parser)
+{
+ h2_task *task = parser->task;
+ apr_array_header_t *hlines = parser->hlines;
+ if (hlines) {
+ apr_table_t *headers = apr_table_make(task->pool, hlines->nelts);
+ int i;
+
+ for (i = 0; i < hlines->nelts; ++i) {
+ char *hline = ((char **)hlines->elts)[i];
+ char *sep = ap_strchr(hline, ':');
+ if (!sep) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, task->c,
+ APLOGNO(02955) "h2_task(%s): invalid header[%d] '%s'",
+ task->id, i, (char*)hline);
+ /* not valid format, abort */
+ return NULL;
+ }
+ (*sep++) = '\0';
+ while (*sep == ' ' || *sep == '\t') {
+ ++sep;
+ }
+
+ if (!h2_util_ignore_header(hline)) {
+ apr_table_merge(headers, hline, sep);
+ }
+ }
+ return headers;
+ }
+ else {
+ return apr_table_make(task->pool, 0);
+ }
+}
+
+static apr_status_t pass_response(h2_task *task, ap_filter_t *f,
+ h2_response_parser *parser)
+{
+ apr_bucket *b;
+ apr_status_t status;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_from_h1(%d): output_filter called", from_h1->stream_id);
+ h2_headers *response = h2_headers_create(parser->http_status,
+ make_table(parser),
+ NULL, task->pool);
+ apr_brigade_cleanup(parser->tmp);
+ b = h2_bucket_headers_create(task->c->bucket_alloc, response);
+ APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
+ b = apr_bucket_flush_create(task->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
+ status = ap_pass_brigade(f->next, parser->tmp);
+ apr_brigade_cleanup(parser->tmp);
+
+ /* reset parser for possible next response */
+ parser->state = H2_RP_STATUS_LINE;
+ apr_array_clear(parser->hlines);
+
+ if (response->status >= 200) {
+ task->output.sent_response = 1;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
+ APLOGNO(03197) "h2_task(%s): passed response %d",
+ task->id, response->status);
+ return status;
+}
+
+static apr_status_t parse_status(h2_task *task, char *line)
+{
+ h2_response_parser *parser = task->output.rparser;
+ int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 :
+ (apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0));
+ if (sindex > 0) {
+ int k = sindex + 3;
+ char keepchar = line[k];
+ line[k] = '\0';
+ parser->http_status = atoi(&line[sindex]);
+ line[k] = keepchar;
+ parser->state = H2_RP_HEADER_LINE;
+
+ return APR_SUCCESS;
+ }
+ /* Seems like there is garbage on the connection. May be a leftover
+ * from a previous proxy request.
+ * This should only happen if the H2_RESPONSE filter is not yet in
+ * place (post_read_request has not been reached and the handler wants
+ * to write something. Probably just the interim response we are
+ * waiting for. But if there is other data hanging around before
+ * that, this needs to fail. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03467)
+ "h2_task(%s): unable to parse status line: %s",
+ task->id, line);
+ return APR_EINVAL;
+}
+
+apr_status_t h2_from_h1_parse_response(h2_task *task, ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+ h2_response_parser *parser = task->output.rparser;
+ char line[HUGE_STRING_LEN];
+ apr_status_t status = APR_SUCCESS;
+
+ if (!parser) {
+ parser = apr_pcalloc(task->pool, sizeof(*parser));
+ parser->task = task;
+ parser->state = H2_RP_STATUS_LINE;
+ parser->hlines = apr_array_make(task->pool, 10, sizeof(char *));
+ task->output.rparser = parser;
+ }
- if (r->header_only && from_h1->response) {
- /* throw away any data after we have compiled the response */
- apr_brigade_cleanup(bb);
- return OK;
- }
-
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b))
- {
- if (AP_BUCKET_IS_ERROR(b) && !eb) {
- eb = b->data;
- continue;
- }
- /*
- * If we see an EOC bucket it is a signal that we should get out
- * of the way doing nothing.
- */
- if (AP_BUCKET_IS_EOC(b)) {
- ap_remove_output_filter(f);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
- "h2_from_h1(%d): eoc bucket passed",
- from_h1->stream_id);
- return ap_pass_brigade(f->next, bb);
+ while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+ switch (parser->state) {
+ case H2_RP_STATUS_LINE:
+ case H2_RP_HEADER_LINE:
+ status = get_line(parser, bb, line, sizeof(line));
+ if (status == APR_EAGAIN) {
+ /* need more data */
+ return APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+ if (parser->state == H2_RP_STATUS_LINE) {
+ /* instead of parsing, just take it directly */
+ status = parse_status(task, line);
+ }
+ else if (line[0] == '\0') {
+ /* end of headers, pass response onward */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): end of response", task->id);
+ return pass_response(task, f, parser);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): response header %s", task->id, line);
+ status = parse_header(parser, line);
+ }
+ break;
+
+ default:
+ return status;
}
}
+ return status;
+}
+
+apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_task *task = f->ctx;
+ request_rec *r = f->r;
+ apr_bucket *b, *bresp, *body_bucket = NULL, *next;
+ ap_bucket_error *eb = NULL;
+ h2_headers *response = NULL;
+ int headers_passing = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_task(%s): output_filter called", task->id);
- if (eb) {
- int st = eb->status;
- apr_brigade_cleanup(bb);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047)
- "h2_from_h1(%d): err bucket status=%d",
- from_h1->stream_id, st);
- ap_die(st, r);
- return AP_FILTER_ERROR;
- }
-
- from_h1->response = create_response(from_h1, r);
- if (from_h1->response == NULL) {
- ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048)
- "h2_from_h1(%d): unable to create response",
- from_h1->stream_id);
- return APR_ENOMEM;
+ if (!task->output.sent_response && !f->c->aborted) {
+ /* check, if we need to send the response now. Until we actually
+ * see a DATA bucket or some EOS/EOR, we do not do so. */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (AP_BUCKET_IS_ERROR(b) && !eb) {
+ eb = b->data;
+ }
+ else if (AP_BUCKET_IS_EOC(b)) {
+ /* If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ ap_remove_output_filter(f);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
+ "h2_task(%s): eoc bucket passed", task->id);
+ return ap_pass_brigade(f->next, bb);
+ }
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ headers_passing = 1;
+ }
+ else if (!APR_BUCKET_IS_FLUSH(b)) {
+ body_bucket = b;
+ break;
+ }
+ }
+
+ if (eb) {
+ int st = eb->status;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047)
+ "h2_task(%s): err bucket status=%d", task->id, st);
+ /* throw everything away and replace it with the error response
+ * generated by ap_die() */
+ apr_brigade_cleanup(bb);
+ ap_die(st, r);
+ return AP_FILTER_ERROR;
+ }
+
+ if (body_bucket || !headers_passing) {
+ /* time to insert the response bucket before the body or if
+ * no h2_headers is passed, e.g. the response is empty */
+ response = create_response(task, r);
+ if (response == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048)
+ "h2_task(%s): unable to create response", task->id);
+ return APR_ENOMEM;
+ }
+
+ bresp = h2_bucket_headers_create(f->c->bucket_alloc, response);
+ if (body_bucket) {
+ APR_BUCKET_INSERT_BEFORE(body_bucket, bresp);
+ }
+ else {
+ APR_BRIGADE_INSERT_HEAD(bb, bresp);
+ }
+ task->output.sent_response = 1;
+ r->sent_bodyct = 1;
+ }
}
if (r->header_only) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_from_h1(%d): header_only, cleanup output brigade",
- from_h1->stream_id);
- apr_brigade_cleanup(bb);
- return OK;
+ "h2_task(%s): header_only, cleanup output brigade",
+ task->id);
+ b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
+ break;
+ }
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ b = next;
+ }
}
-
- r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
-
- ap_remove_output_filter(f);
- if (APLOGctrace1(f->c)) {
- apr_off_t len = 0;
- apr_brigade_length(bb, 0, &len);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_from_h1(%d): removed header filter, passing brigade "
- "len=%ld", from_h1->stream_id, (long)len);
+ else if (task->output.sent_response) {
+ /* lets get out of the way, our task is done */
+ ap_remove_output_filter(f);
}
return ap_pass_brigade(f->next, bb);
}
-apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb)
+static void make_chunk(h2_task *task, apr_bucket_brigade *bb,
+ apr_bucket *first, apr_off_t chunk_len,
+ apr_bucket *tail)
+{
+ /* Surround the buckets [first, tail[ with new buckets carrying the
+ * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
+ * to the end of the brigade. */
+ char buffer[128];
+ apr_bucket *c;
+ int len;
+
+ len = apr_snprintf(buffer, H2_ALEN(buffer),
+ "%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len);
+ c = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(first, c);
+ c = apr_bucket_heap_create("\r\n", 2, NULL, bb->bucket_alloc);
+ if (tail) {
+ APR_BUCKET_INSERT_BEFORE(tail, c);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(bb, c);
+ }
+ task->input.chunked_total += chunk_len;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
+ "h2_task(%s): added chunk %ld, total %ld",
+ task->id, (long)chunk_len, (long)task->input.chunked_total);
+}
+
+static int ser_header(void *ctx, const char *name, const char *value)
+{
+ apr_bucket_brigade *bb = ctx;
+ apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", name, value);
+ return 1;
+}
+
+static apr_status_t read_and_chunk(ap_filter_t *f, h2_task *task,
+ apr_read_type_e block) {
+ request_rec *r = f->r;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket_brigade *bb = task->input.bbchunk;
+
+ if (!bb) {
+ bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ task->input.bbchunk = bb;
+ }
+
+ if (APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *b, *next, *first_data = NULL;
+ apr_bucket_brigade *tmp;
+ apr_off_t bblen = 0;
+
+ /* get more data from the lower layer filters. Always do this
+ * in larger pieces, since we handle the read modes ourself. */
+ status = ap_get_brigade(f->next, bb,
+ AP_MODE_READBYTES, block, 32*1024);
+ if (status == APR_EOF) {
+ if (!task->input.eos) {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ task->input.eos = 1;
+ return APR_SUCCESS;
+ }
+ ap_remove_input_filter(f);
+ return status;
+
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb) && !task->input.eos;
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (first_data) {
+ make_chunk(task, bb, first_data, bblen, b);
+ first_data = NULL;
+ }
+
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ h2_headers *headers = h2_bucket_headers_get(b);
+
+ ap_assert(headers);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_task(%s): receiving trailers", task->id);
+ tmp = apr_brigade_split_ex(bb, b, NULL);
+ if (!apr_is_empty_table(headers->headers)) {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
+ apr_table_do(ser_header, bb, headers->headers, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ }
+ r->trailers_in = apr_table_clone(r->pool, headers->headers);
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ APR_BRIGADE_CONCAT(bb, tmp);
+ apr_brigade_destroy(tmp);
+ task->input.eos = 1;
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ tmp = apr_brigade_split_ex(bb, b, NULL);
+ status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ APR_BRIGADE_CONCAT(bb, tmp);
+ apr_brigade_destroy(tmp);
+ task->input.eos = 1;
+ }
+ }
+ else if (b->length == 0) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else {
+ if (!first_data) {
+ first_data = b;
+ bblen = 0;
+ }
+ bblen += b->length;
+ }
+ }
+
+ if (first_data) {
+ make_chunk(task, bb, first_data, bblen, NULL);
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_filter_request_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
{
h2_task *task = f->ctx;
- h2_from_h1 *from_h1 = task->output.from_h1;
request_rec *r = f->r;
- apr_bucket *b;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b, *next;
+ core_server_config *conf =
+ (core_server_config *) ap_get_module_config(r->server->module_config,
+ &core_module);
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
+ "h2_task(%s): request filter, exp=%d", task->id, r->expecting_100);
+ if (!task->request->chunked) {
+ status = ap_get_brigade(f->next, bb, mode, block, readbytes);
+ /* pipe data through, just take care of trailers */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ h2_headers *headers = h2_bucket_headers_get(b);
+ ap_assert(headers);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_task(%s): receiving trailers", task->id);
+ r->trailers_in = headers->headers;
+ if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
+ r->headers_in = apr_table_overlay(r->pool, r->headers_in,
+ r->trailers_in);
+ }
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_remove_input_filter(f);
+ break;
+ }
+ }
+ return status;
+ }
+
+ /* Things are more complicated. The standard HTTP input filter, which
+ * does a lot what we do not want to duplicate, also cares about chunked
+ * transfer encoding and trailers.
+ * We need to simulate chunked encoding for it to be happy.
+ */
+ if ((status = read_and_chunk(f, task, block)) != APR_SUCCESS) {
+ return status;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, task->input.bbchunk);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, task->input.bbchunk, readbytes);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, task->input.bbchunk, readbytes);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, task->input.bbchunk, block,
+ HUGE_STRING_LEN);
+ if (APLOGctrace1(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_task(%s): getline: %s",
+ task->id, buffer);
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(02942)
+ "h2_task, unsupported READ mode %d", mode);
+ status = APR_ENOTIMPL;
+ }
+
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, "forwarding input", bb);
+ return status;
+}
+
+apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_task *task = f->ctx;
+ request_rec *r = f->r;
+ apr_bucket *b, *e;
- if (from_h1 && from_h1->response) {
- /* Detect the EOR bucket and forward any trailers that may have
- * been set to our h2_response.
+ if (task && r) {
+ /* Detect the EOS/EOR bucket and forward any trailers that may have
+ * been set to our h2_headers.
*/
for (b = APR_BRIGADE_FIRST(bb);
b != APR_BRIGADE_SENTINEL(bb);
b = APR_BUCKET_NEXT(b))
{
- if (AP_BUCKET_IS_EOR(b)) {
- /* FIXME: need a better test case than this.
- apr_table_setn(r->trailers_out, "X", "1"); */
- if (r->trailers_out && !apr_is_empty_table(r->trailers_out)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049)
- "h2_from_h1(%d): trailers filter, saving trailers",
- from_h1->stream_id);
- h2_response_set_trailers(from_h1->response,
- apr_table_clone(from_h1->pool,
- r->trailers_out));
- }
+ if ((APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b))
+ && r->trailers_out && !apr_is_empty_table(r->trailers_out)) {
+ h2_headers *headers;
+ apr_table_t *trailers;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049)
+ "h2_task(%s): sending trailers", task->id);
+ trailers = apr_table_clone(r->pool, r->trailers_out);
+ headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool);
+ e = h2_bucket_headers_create(bb->bucket_alloc, headers);
+ APR_BUCKET_INSERT_BEFORE(b, e);
+ apr_table_clear(r->trailers_out);
+ ap_remove_output_filter(f);
break;
}
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_from_h1.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_from_h1.h
--- httpd-2.4.23/modules/http2/h2_from_h1.h 2016-05-18 17:10:20.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_from_h1.h 2016-10-17 18:11:12.000000000 +0200
@@ -30,46 +30,20 @@
* we need to have all handlers and filters involved in request/response
* processing, so this seems to be the way for now.
*/
+struct h2_headers;
+struct h2_task;
-typedef enum {
- H2_RESP_ST_STATUS_LINE, /* parsing http/1 status line */
- H2_RESP_ST_HEADERS, /* parsing http/1 response headers */
- H2_RESP_ST_BODY, /* transferring response body */
- H2_RESP_ST_DONE /* complete response converted */
-} h2_from_h1_state_t;
+apr_status_t h2_from_h1_parse_response(struct h2_task *task, ap_filter_t *f,
+ apr_bucket_brigade *bb);
-struct h2_response;
+apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb);
-typedef struct h2_from_h1 h2_from_h1;
+apr_status_t h2_filter_request_in(ap_filter_t* f,
+ apr_bucket_brigade* brigade,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes);
-struct h2_from_h1 {
- int stream_id;
- h2_from_h1_state_t state;
- apr_pool_t *pool;
- apr_bucket_brigade *bb;
-
- apr_off_t content_length;
- int chunked;
-
- int http_status;
- apr_array_header_t *hlines;
-
- struct h2_response *response;
-};
-
-
-h2_from_h1 *h2_from_h1_create(int stream_id, apr_pool_t *pool);
-
-apr_status_t h2_from_h1_read_response(h2_from_h1 *from_h1,
- ap_filter_t* f, apr_bucket_brigade* bb);
-
-struct h2_response *h2_from_h1_get_response(h2_from_h1 *from_h1);
-
-apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb);
-
-apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb);
-
-void h2_from_h1_set_basic_http_header(apr_table_t *headers, request_rec *r,
- apr_pool_t *pool);
+apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb);
#endif /* defined(__mod_h2__h2_from_h1__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2.h
--- httpd-2.4.23/modules/http2/h2.h 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2.h 2017-04-18 15:12:38.000000000 +0200
@@ -47,13 +47,14 @@ extern const char *H2_MAGIC_TOKEN;
#define H2_HEADER_PATH_LEN 5
#define H2_CRLF "\r\n"
+/* Max data size to write so it fits inside a TLS record */
+#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9)
+
/* Maximum number of padding bytes in a frame, rfc7540 */
#define H2_MAX_PADLEN 256
/* Initial default window size, RFC 7540 ch. 6.5.2 */
#define H2_INITIAL_WINDOW_SIZE ((64*1024)-1)
-#define H2_HTTP_2XX(a) ((a) >= 200 && (a) < 300)
-
#define H2_STREAM_CLIENT_INITIATED(id) (id&0x01)
#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0]))
@@ -80,34 +81,44 @@ typedef enum {
} h2_push_policy;
typedef enum {
- H2_STREAM_ST_IDLE,
- H2_STREAM_ST_OPEN,
- H2_STREAM_ST_RESV_LOCAL,
- H2_STREAM_ST_RESV_REMOTE,
- H2_STREAM_ST_CLOSED_INPUT,
- H2_STREAM_ST_CLOSED_OUTPUT,
- H2_STREAM_ST_CLOSED,
-} h2_stream_state_t;
-
-typedef enum {
H2_SESSION_ST_INIT, /* send initial SETTINGS, etc. */
H2_SESSION_ST_DONE, /* finished, connection close */
H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */
H2_SESSION_ST_BUSY, /* read/write without stop */
H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */
- H2_SESSION_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */
- H2_SESSION_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */
+ H2_SESSION_ST_CLEANUP, /* pool is being cleaned up */
} h2_session_state;
typedef struct h2_session_props {
- apr_uint32_t accepted_max; /* the highest remote stream id was/will be handled */
- apr_uint32_t completed_max; /* the highest remote stream completed */
- apr_uint32_t emitted_count; /* the number of local streams sent */
- apr_uint32_t emitted_max; /* the highest local stream id sent */
- apr_uint32_t error; /* the last session error encountered */
+ int accepted_max; /* the highest remote stream id was/will be handled */
+ int completed_max; /* the highest remote stream completed */
+ int emitted_count; /* the number of local streams sent */
+ int emitted_max; /* the highest local stream id sent */
+ int error; /* the last session error encountered */
unsigned int accepting : 1; /* if the session is accepting new streams */
+ unsigned int shutdown : 1; /* if the final GOAWAY has been sent */
} h2_session_props;
+typedef enum h2_stream_state_t {
+ H2_SS_IDLE,
+ H2_SS_RSVD_R,
+ H2_SS_RSVD_L,
+ H2_SS_OPEN,
+ H2_SS_CLOSED_R,
+ H2_SS_CLOSED_L,
+ H2_SS_CLOSED,
+ H2_SS_CLEANUP,
+ H2_SS_MAX
+} h2_stream_state_t;
+
+typedef enum {
+ H2_SEV_CLOSED_L,
+ H2_SEV_CLOSED_R,
+ H2_SEV_CANCELLED,
+ H2_SEV_EOS_SENT,
+ H2_SEV_IN_DATA_PENDING,
+} h2_stream_event_t;
+
/* h2_request is the transformer of HTTP2 streams into HTTP/1.1 internal
* format that will be fed to various httpd input filters to finally
@@ -116,37 +127,23 @@ typedef struct h2_session_props {
typedef struct h2_request h2_request;
struct h2_request {
- int id; /* stream id */
- int initiated_on; /* initiating stream id (PUSH) or 0 */
-
const char *method; /* pseudo header values, see ch. 8.1.2.3 */
const char *scheme;
const char *authority;
const char *path;
-
apr_table_t *headers;
- apr_table_t *trailers;
apr_time_t request_time;
- apr_off_t content_length;
-
- unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
- unsigned int eoh : 1; /* iff end-of-headers has been seen and request is complete */
- unsigned int body : 1; /* iff this request has a body */
+ unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
- unsigned int push_policy; /* which push policy to use for this request */
};
-typedef struct h2_response h2_response;
+typedef struct h2_headers h2_headers;
-struct h2_response {
- int stream_id;
- int rst_error;
- int http_status;
- apr_off_t content_length;
+struct h2_headers {
+ int status;
apr_table_t *headers;
- apr_table_t *trailers;
- const char *sos_filter;
+ apr_table_t *notes;
};
typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
@@ -155,7 +152,9 @@ typedef int h2_stream_pri_cmp(int stream
/* Note key to attach connection task id to conn_rec/request_rec instances */
-#define H2_TASK_ID_NOTE "http2-task-id"
-
+#define H2_TASK_ID_NOTE "http2-task-id"
+#define H2_FILTER_DEBUG_NOTE "http2-debug"
+#define H2_HDR_CONFORMANCE "http2-hdr-conformance"
+#define H2_HDR_CONFORMANCE_UNSAFE "unsafe"
#endif /* defined(__mod_h2__h2__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_h2.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_h2.c
--- httpd-2.4.23/modules/http2/h2_h2.c 2016-05-14 13:49:01.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_h2.c 2017-03-12 15:20:29.000000000 +0100
@@ -32,12 +32,15 @@
#include "mod_http2.h"
#include "h2_private.h"
+#include "h2_bucket_beam.h"
#include "h2_stream.h"
#include "h2_task.h"
#include "h2_config.h"
#include "h2_ctx.h"
#include "h2_conn.h"
+#include "h2_filter.h"
#include "h2_request.h"
+#include "h2_headers.h"
#include "h2_session.h"
#include "h2_util.h"
#include "h2_h2.h"
@@ -433,6 +436,7 @@ static int cipher_is_blacklisted(const c
static int h2_h2_process_conn(conn_rec* c);
static int h2_h2_pre_close_conn(conn_rec* c);
static int h2_h2_post_read_req(request_rec *r);
+static int h2_h2_late_fixups(request_rec *r);
/*******************************************************************************
* Once per lifetime init, retrieve optional functions
@@ -567,6 +571,11 @@ void h2_h2_register_hooks(void)
* never see the response.
*/
ap_hook_post_read_request(h2_h2_post_read_req, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_fixups(h2_h2_late_fixups, NULL, NULL, APR_HOOK_LAST);
+
+ /* special bucket type transfer through a h2_bucket_beam */
+ h2_register_bucket_beamer(h2_bucket_headers_beam);
+ h2_register_bucket_beamer(h2_bucket_observer_beam);
}
int h2_h2_process_conn(conn_rec* c)
@@ -643,6 +652,7 @@ int h2_h2_process_conn(conn_rec* c)
status = h2_conn_setup(ctx, c, NULL);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
if (status != APR_SUCCESS) {
+ h2_ctx_clear(c);
return status;
}
}
@@ -665,13 +675,39 @@ static int h2_h2_pre_close_conn(conn_rec
ctx = h2_ctx_get(c, 0);
if (ctx) {
/* If the session has been closed correctly already, we will not
- * fiond a h2_ctx here. The presence indicates that the session
+ * find a h2_ctx here. The presence indicates that the session
* is still ongoing. */
return h2_conn_pre_close(ctx, c);
}
return DECLINED;
}
+static void check_push(request_rec *r, const char *tag)
+{
+ const h2_config *conf = h2_config_rget(r);
+ if (!r->expecting_100
+ && conf && conf->push_list && conf->push_list->nelts > 0) {
+ int i, old_status;
+ const char *old_line;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "%s, early announcing %d resources for push",
+ tag, conf->push_list->nelts);
+ for (i = 0; i < conf->push_list->nelts; ++i) {
+ h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res);
+ apr_table_addn(r->headers_out, "Link",
+ apr_psprintf(r->pool, "<%s>; rel=preload%s",
+ push->uri_ref, push->critical? "; critical" : ""));
+ }
+ old_status = r->status;
+ old_line = r->status_line;
+ r->status = 103;
+ r->status_line = "103 Early Hints";
+ ap_send_interim_response(r, 1);
+ r->status = old_status;
+ r->status_line = old_line;
+ }
+}
+
static int h2_h2_post_read_req(request_rec *r)
{
/* slave connection? */
@@ -682,33 +718,48 @@ static int h2_h2_post_read_req(request_r
* that we manipulate filters only once. */
if (task && !task->filters_set) {
ap_filter_t *f;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "h2_task(%s): adding request filters", task->id);
+
+ /* setup the correct filters to process the request for h2 */
+ ap_add_input_filter("H2_REQUEST", task, r, r->connection);
- /* setup the correct output filters to process the response
- * on the proper mod_http2 way. */
- ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "adding task output filter");
- if (task->ser_headers) {
- ap_add_output_filter("H1_TO_H2_RESP", task, r, r->connection);
- }
- else {
- /* replace the core http filter that formats response headers
- * in HTTP/1 with our own that collects status and headers */
- ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
- ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
- }
+ /* replace the core http filter that formats response headers
+ * in HTTP/1 with our own that collects status and headers */
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+ ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
- /* trailers processing. Incoming trailers are added to this
- * request via our h2 input filter, outgoing trailers
- * in a special h2 out filter. */
for (f = r->input_filters; f; f = f->next) {
- if (!strcmp("H2_TO_H1", f->frec->name)) {
+ if (!strcmp("H2_SLAVE_IN", f->frec->name)) {
f->r = r;
break;
}
}
- ap_add_output_filter("H2_TRAILERS", task, r, r->connection);
+ ap_add_output_filter("H2_TRAILERS_OUT", task, r, r->connection);
task->filters_set = 1;
}
}
return DECLINED;
+}
+
+static int h2_h2_late_fixups(request_rec *r)
+{
+ /* slave connection? */
+ if (r->connection->master) {
+ h2_ctx *ctx = h2_ctx_rget(r);
+ struct h2_task *task = h2_ctx_get_task(ctx);
+ if (task) {
+ /* check if we copy vs. setaside files in this location */
+ task->output.copy_files = h2_config_geti(h2_config_rget(r),
+ H2_CONF_COPY_FILES);
+ if (task->output.copy_files) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_slave_out(%s): copy_files on", task->id);
+ h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL);
+ }
+ check_push(r, "late_fixup");
+ }
+ }
+ return DECLINED;
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_h2.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_h2.h
--- httpd-2.4.23/modules/http2/h2_h2.h 2016-03-02 12:21:28.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_h2.h 2016-08-25 14:48:18.000000000 +0200
@@ -17,7 +17,7 @@
#define __mod_h2__h2_h2__
/**
- * List of ALPN protocol identifiers that we suport in cleartext
+ * List of ALPN protocol identifiers that we support in cleartext
* negotiations. NULL terminated.
*/
extern const char *h2_clear_protos[];
@@ -36,7 +36,7 @@ extern const char *h2_tls_protos[];
const char *h2_h2_err_description(unsigned int h2_error);
/*
- * One time, post config intialization.
+ * One time, post config initialization.
*/
apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s);
diff -up --new-file httpd-2.4.23/modules/http2/h2_headers.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_headers.c
--- httpd-2.4.23/modules/http2/h2_headers.c 1970-01-01 01:00:00.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_headers.c 2017-04-18 15:12:38.000000000 +0200
@@ -0,0 +1,177 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <util_time.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2_private.h"
+#include "h2_h2.h"
+#include "h2_util.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+
+
+static int is_unsafe(server_rec *s)
+{
+ core_server_config *conf = ap_get_core_module_config(s->module_config);
+ return (conf->http_conformance == AP_HTTP_CONFORMANCE_UNSAFE);
+}
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_headers *headers;
+} h2_bucket_headers;
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r)
+{
+ h2_bucket_headers *br;
+
+ br = apr_bucket_alloc(sizeof(*br), b->list);
+ br->headers = r;
+
+ b = apr_bucket_shared_make(b, br, 0, 0);
+ b->type = &h2_bucket_type_headers;
+
+ return b;
+}
+
+apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
+ h2_headers *r)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_headers_make(b, r);
+ return b;
+}
+
+h2_headers *h2_bucket_headers_get(apr_bucket *b)
+{
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ return ((h2_bucket_headers *)b->data)->headers;
+ }
+ return NULL;
+}
+
+const apr_bucket_type_t h2_bucket_type_headers = {
+ "H2HEADERS", 5, APR_BUCKET_METADATA,
+ apr_bucket_destroy_noop,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
+apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src)
+{
+ if (H2_BUCKET_IS_HEADERS(src)) {
+ h2_headers *r = ((h2_bucket_headers *)src->data)->headers;
+ apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ return b;
+ }
+ return NULL;
+}
+
+
+h2_headers *h2_headers_create(int status, apr_table_t *headers_in,
+ apr_table_t *notes, apr_pool_t *pool)
+{
+ h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers));
+ headers->status = status;
+ headers->headers = (headers_in? apr_table_copy(pool, headers_in)
+ : apr_table_make(pool, 5));
+ headers->notes = (notes? apr_table_copy(pool, notes)
+ : apr_table_make(pool, 5));
+ return headers;
+}
+
+h2_headers *h2_headers_rcreate(request_rec *r, int status,
+ apr_table_t *header, apr_pool_t *pool)
+{
+ h2_headers *headers = h2_headers_create(status, header, r->notes, pool);
+ if (headers->status == HTTP_FORBIDDEN) {
+ const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
+ if (cause) {
+ /* This request triggered a TLS renegotiation that is now allowed
+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
+ APLOGNO(03061)
+ "h2_headers(%ld): renegotiate forbidden, cause: %s",
+ (long)r->connection->id, cause);
+ headers->status = H2_ERR_HTTP_1_1_REQUIRED;
+ }
+ }
+ if (is_unsafe(r->server)) {
+ apr_table_setn(headers->notes, H2_HDR_CONFORMANCE,
+ H2_HDR_CONFORMANCE_UNSAFE);
+ }
+ return headers;
+}
+
+h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
+{
+ return h2_headers_create(h->status, apr_table_copy(pool, h->headers),
+ apr_table_copy(pool, h->notes), pool);
+}
+
+h2_headers *h2_headers_die(apr_status_t type,
+ const h2_request *req, apr_pool_t *pool)
+{
+ h2_headers *headers;
+ char *date;
+
+ headers = apr_pcalloc(pool, sizeof(h2_headers));
+ headers->status = (type >= 200 && type < 600)? type : 500;
+ headers->headers = apr_table_make(pool, 5);
+ headers->notes = apr_table_make(pool, 5);
+
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, req? req->request_time : apr_time_now());
+ apr_table_setn(headers->headers, "Date", date);
+ apr_table_setn(headers->headers, "Server", ap_get_server_banner());
+
+ return headers;
+}
+
+int h2_headers_are_response(h2_headers *headers)
+{
+ return headers->status >= 200;
+}
+
diff -up --new-file httpd-2.4.23/modules/http2/h2_headers.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_headers.h
--- httpd-2.4.23/modules/http2/h2_headers.h 1970-01-01 01:00:00.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_headers.h 2017-04-18 15:12:38.000000000 +0200
@@ -0,0 +1,76 @@
+/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_headers__
+#define __mod_h2__h2_headers__
+
+#include "h2.h"
+
+struct h2_bucket_beam;
+
+extern const apr_bucket_type_t h2_bucket_type_headers;
+
+#define H2_BUCKET_IS_HEADERS(e) (e->type == &h2_bucket_type_headers)
+
+apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r);
+
+apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
+ h2_headers *r);
+
+h2_headers *h2_bucket_headers_get(apr_bucket *b);
+
+apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam,
+ apr_bucket_brigade *dest,
+ const apr_bucket *src);
+
+/**
+ * Create the headers from the given status and headers
+ * @param status the headers status
+ * @param header the headers of the headers
+ * @param notes the notes carried by the headers
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_create(int status, apr_table_t *header,
+ apr_table_t *notes, apr_pool_t *pool);
+
+/**
+ * Create the headers from the given request_rec.
+ * @param r the request record which was processed
+ * @param status the headers status
+ * @param header the headers of the headers
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_rcreate(request_rec *r, int status,
+ apr_table_t *header, apr_pool_t *pool);
+
+/**
+ * Clone the headers into another pool. This will not copy any
+ * header strings.
+ */
+h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
+
+/**
+ * Create the headers for the given error.
+ * @param stream_id id of the stream to create the headers for
+ * @param type the error code
+ * @param req the original h2_request
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_die(apr_status_t type,
+ const struct h2_request *req, apr_pool_t *pool);
+
+int h2_headers_are_response(h2_headers *headers);
+
+#endif /* defined(__mod_h2__h2_headers__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_mplx.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_mplx.c
--- httpd-2.4.23/modules/http2/h2_mplx.c 2016-06-22 15:30:24.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_mplx.c 2017-10-13 10:37:45.000000000 +0200
@@ -17,6 +17,7 @@
#include <stddef.h>
#include <stdlib.h>
+#include <apr_atomic.h>
#include <apr_thread_mutex.h>
#include <apr_thread_cond.h>
#include <apr_strings.h>
@@ -26,204 +27,115 @@
#include <http_core.h>
#include <http_log.h>
+#include <mpm_common.h>
+
#include "mod_http2.h"
+#include "h2.h"
#include "h2_private.h"
#include "h2_bucket_beam.h"
#include "h2_config.h"
#include "h2_conn.h"
#include "h2_ctx.h"
#include "h2_h2.h"
-#include "h2_response.h"
#include "h2_mplx.h"
#include "h2_ngn_shed.h"
#include "h2_request.h"
#include "h2_stream.h"
+#include "h2_session.h"
#include "h2_task.h"
-#include "h2_worker.h"
#include "h2_workers.h"
#include "h2_util.h"
-static void h2_beam_log(h2_bucket_beam *beam, int id, const char *msg,
- conn_rec *c, int level)
-{
- if (beam && APLOG_C_IS_LEVEL(c,level)) {
- char buffer[2048];
- apr_size_t off = 0;
-
- off += apr_snprintf(buffer+off, H2_ALEN(buffer)-off, "cl=%d, ", beam->closed);
- off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "red", ", ", &beam->red);
- off += h2_util_bb_print(buffer+off, H2_ALEN(buffer)-off, "green", ", ", beam->green);
- off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "hold", ", ", &beam->hold);
- off += h2_util_bl_print(buffer+off, H2_ALEN(buffer)-off, "purge", "", &beam->purge);
-
- ap_log_cerror(APLOG_MARK, level, 0, c, "beam(%ld-%d): %s %s",
- c->id, id, msg, buffer);
- }
-}
-
-/* utility for iterating over ihash task sets */
+/* utility for iterating over ihash stream sets */
typedef struct {
h2_mplx *m;
- h2_task *task;
+ h2_stream *stream;
apr_time_t now;
-} task_iter_ctx;
-
-/* NULL or the mutex hold by this thread, used for recursive calls
- */
-static apr_threadkey_t *thread_lock;
+} stream_iter_ctx;
apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
{
- return apr_threadkey_private_create(&thread_lock, NULL, pool);
+ return APR_SUCCESS;
}
-static apr_status_t enter_mutex(h2_mplx *m, int *pacquired)
-{
- apr_status_t status;
- void *mutex = NULL;
-
- /* Enter the mutex if this thread already holds the lock or
- * if we can acquire it. Only on the later case do we unlock
- * onleaving the mutex.
- * This allow recursive entering of the mutex from the saem thread,
- * which is what we need in certain situations involving callbacks
- */
- AP_DEBUG_ASSERT(m);
- apr_threadkey_private_get(&mutex, thread_lock);
- if (mutex == m->lock) {
- *pacquired = 0;
- return APR_SUCCESS;
- }
+#define H2_MPLX_ENTER(m) \
+ do { apr_status_t rv; if ((rv = apr_thread_mutex_lock(m->lock)) != APR_SUCCESS) {\
+ return rv;\
+ } } while(0)
- AP_DEBUG_ASSERT(m->lock);
- status = apr_thread_mutex_lock(m->lock);
- *pacquired = (status == APR_SUCCESS);
- if (*pacquired) {
- apr_threadkey_private_set(m->lock, thread_lock);
- }
- return status;
-}
+#define H2_MPLX_LEAVE(m) \
+ apr_thread_mutex_unlock(m->lock)
+
+#define H2_MPLX_ENTER_ALWAYS(m) \
+ apr_thread_mutex_lock(m->lock)
-static void leave_mutex(h2_mplx *m, int acquired)
-{
- if (acquired) {
- apr_threadkey_private_set(NULL, thread_lock);
- apr_thread_mutex_unlock(m->lock);
- }
-}
+#define H2_MPLX_ENTER_MAYBE(m, lock) \
+ if (lock) apr_thread_mutex_lock(m->lock)
-static void beam_leave(void *ctx, apr_thread_mutex_t *lock)
-{
- leave_mutex(ctx, 1);
-}
+#define H2_MPLX_LEAVE_MAYBE(m, lock) \
+ if (lock) apr_thread_mutex_unlock(m->lock)
-static apr_status_t beam_enter(void *ctx, h2_beam_lock *pbl)
-{
- h2_mplx *m = ctx;
- int acquired;
- apr_status_t status;
-
- status = enter_mutex(m, &acquired);
- if (status == APR_SUCCESS) {
- pbl->mutex = m->lock;
- pbl->leave = acquired? beam_leave : NULL;
- pbl->leave_ctx = m;
- }
- return status;
-}
+static void check_data_for(h2_mplx *m, h2_stream *stream, int lock);
static void stream_output_consumed(void *ctx,
h2_bucket_beam *beam, apr_off_t length)
{
- h2_task *task = ctx;
+ h2_stream *stream = ctx;
+ h2_task *task = stream->task;
+
if (length > 0 && task && task->assigned) {
h2_req_engine_out_consumed(task->assigned, task->c, length);
}
}
-static void stream_input_consumed(void *ctx,
- h2_bucket_beam *beam, apr_off_t length)
+static void stream_input_ev(void *ctx, h2_bucket_beam *beam)
{
- h2_mplx *m = ctx;
- if (m->input_consumed && length) {
- m->input_consumed(m->input_consumed_ctx, beam->id, length);
- }
+ h2_stream *stream = ctx;
+ h2_mplx *m = stream->session->mplx;
+ apr_atomic_set32(&m->event_pending, 1);
}
-static int can_beam_file(void *ctx, h2_bucket_beam *beam, apr_file_t *file)
+static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
{
- h2_mplx *m = ctx;
- if (m->tx_handles_reserved > 0) {
- --m->tx_handles_reserved;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
- "h2_mplx(%ld-%d): beaming file %s, tx_avail %d",
- m->id, beam->id, beam->tag, m->tx_handles_reserved);
- return 1;
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
- "h2_mplx(%ld-%d): can_beam_file denied on %s",
- m->id, beam->id, beam->tag);
- return 0;
+ h2_stream_in_consumed(ctx, length);
}
-static void have_out_data_for(h2_mplx *m, int stream_id);
-static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master);
-
-static void check_tx_reservation(h2_mplx *m)
+static void stream_joined(h2_mplx *m, h2_stream *stream)
{
- if (m->tx_handles_reserved <= 0) {
- m->tx_handles_reserved += h2_workers_tx_reserve(m->workers,
- H2MIN(m->tx_chunk_size, h2_ihash_count(m->tasks)));
- }
+ ap_assert(!stream->task || stream->task->worker_done);
+
+ h2_ihash_remove(m->shold, stream->id);
+ h2_ihash_add(m->spurge, stream);
}
-static void check_tx_free(h2_mplx *m)
+static void stream_cleanup(h2_mplx *m, h2_stream *stream)
{
- if (m->tx_handles_reserved > m->tx_chunk_size) {
- apr_size_t count = m->tx_handles_reserved - m->tx_chunk_size;
- m->tx_handles_reserved = m->tx_chunk_size;
- h2_workers_tx_free(m->workers, count);
- }
- else if (m->tx_handles_reserved && h2_ihash_empty(m->tasks)) {
- h2_workers_tx_free(m->workers, m->tx_handles_reserved);
- m->tx_handles_reserved = 0;
- }
-}
+ ap_assert(stream->state == H2_SS_CLEANUP);
-static int purge_stream(void *ctx, void *val)
-{
- h2_mplx *m = ctx;
- h2_stream *stream = val;
- h2_task *task = h2_ihash_get(m->tasks, stream->id);
- h2_ihash_remove(m->spurge, stream->id);
- h2_stream_destroy(stream);
- if (task) {
- task_destroy(m, task, 1);
+ if (stream->input) {
+ h2_beam_on_consumed(stream->input, NULL, NULL, NULL);
+ h2_beam_abort(stream->input);
}
- return 0;
-}
-
-static void purge_streams(h2_mplx *m)
-{
- if (!h2_ihash_empty(m->spurge)) {
- while(!h2_ihash_iter(m->spurge, purge_stream, m)) {
- /* repeat until empty */
- }
- h2_ihash_clear(m->spurge);
+ if (stream->output) {
+ h2_beam_on_produced(stream->output, NULL, NULL);
+ h2_beam_leave(stream->output);
}
-}
+
+ h2_stream_cleanup(stream);
-static void h2_mplx_destroy(h2_mplx *m)
-{
- AP_DEBUG_ASSERT(m);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): destroy, tasks=%d",
- m->id, (int)h2_ihash_count(m->tasks));
- check_tx_free(m);
- if (m->pool) {
- apr_pool_destroy(m->pool);
+ h2_ihash_remove(m->streams, stream->id);
+ h2_iq_remove(m->q, stream->id);
+ h2_ififo_remove(m->readyq, stream->id);
+ h2_ihash_add(m->shold, stream);
+
+ if (!stream->task || stream->task->worker_done) {
+ stream_joined(m, stream);
+ }
+ else if (stream->task) {
+ stream->task->c->aborted = 1;
+ apr_thread_cond_broadcast(m->task_thawed);
}
}
@@ -240,66 +152,83 @@ static void h2_mplx_destroy(h2_mplx *m)
*/
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
const h2_config *conf,
- apr_interval_time_t stream_timeout,
h2_workers *workers)
{
apr_status_t status = APR_SUCCESS;
- apr_allocator_t *allocator = NULL;
+ apr_allocator_t *allocator;
+ apr_thread_mutex_t *mutex;
h2_mplx *m;
- AP_DEBUG_ASSERT(conf);
+ h2_ctx *ctx = h2_ctx_get(c, 0);
+ ap_assert(conf);
- status = apr_allocator_create(&allocator);
- if (status != APR_SUCCESS) {
- return NULL;
- }
-
m = apr_pcalloc(parent, sizeof(h2_mplx));
if (m) {
m->id = c->id;
- APR_RING_ELEM_INIT(m, link);
m->c = c;
+ m->s = (ctx? h2_ctx_server_get(ctx) : NULL);
+ if (!m->s) {
+ m->s = c->base_server;
+ }
+
+ /* We create a pool with its own allocator to be used for
+ * processing slave connections. This is the only way to have the
+ * processing independant of its parent pool in the sense that it
+ * can work in another thread. Also, the new allocator needs its own
+ * mutex to synchronize sub-pools.
+ */
+ status = apr_allocator_create(&allocator);
+ if (status != APR_SUCCESS) {
+ return NULL;
+ }
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
apr_pool_create_ex(&m->pool, parent, NULL, allocator);
if (!m->pool) {
+ apr_allocator_destroy(allocator);
return NULL;
}
apr_pool_tag(m->pool, "h2_mplx");
apr_allocator_owner_set(allocator, m->pool);
-
+ status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(m->pool);
+ return NULL;
+ }
+ apr_allocator_mutex_set(allocator, mutex);
+
status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
m->pool);
if (status != APR_SUCCESS) {
- h2_mplx_destroy(m);
+ apr_pool_destroy(m->pool);
return NULL;
}
status = apr_thread_cond_create(&m->task_thawed, m->pool);
if (status != APR_SUCCESS) {
- h2_mplx_destroy(m);
+ apr_pool_destroy(m->pool);
return NULL;
}
- m->bucket_alloc = apr_bucket_alloc_create(m->pool);
m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
m->q = h2_iq_create(m->pool, m->max_streams);
- m->sready = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->sresume = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id));
- m->stream_timeout = stream_timeout;
+ status = h2_ififo_set_create(&m->readyq, m->pool, m->max_streams);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(m->pool);
+ return NULL;
+ }
+
m->workers = workers;
- m->workers_max = workers->max_workers;
- m->workers_def_limit = 4;
- m->workers_limit = m->workers_def_limit;
+ m->max_active = workers->max_workers;
+ m->limit_active = 6; /* the original h1 max parallel connections */
m->last_limit_change = m->last_idle_block = apr_time_now();
- m->limit_change_interval = apr_time_from_msec(200);
-
- m->tx_handles_reserved = 0;
- m->tx_chunk_size = 4;
+ m->limit_change_interval = apr_time_from_msec(100);
m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
@@ -310,411 +239,330 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr
return m;
}
-apr_uint32_t h2_mplx_shutdown(h2_mplx *m)
+int h2_mplx_shutdown(h2_mplx *m)
{
- int acquired, max_stream_started = 0;
+ int max_stream_started = 0;
- if (enter_mutex(m, &acquired) == APR_SUCCESS) {
- max_stream_started = m->max_stream_started;
- /* Clear schedule queue, disabling existing streams from starting */
- h2_iq_clear(m->q);
- leave_mutex(m, acquired);
- }
+ H2_MPLX_ENTER(m);
+
+ max_stream_started = m->max_stream_started;
+ /* Clear schedule queue, disabling existing streams from starting */
+ h2_iq_clear(m->q);
+
+ H2_MPLX_LEAVE(m);
return max_stream_started;
}
-static void input_consumed_signal(h2_mplx *m, h2_stream *stream)
+static int input_consumed_signal(h2_mplx *m, h2_stream *stream)
{
- if (stream->input && stream->started) {
- h2_beam_send(stream->input, NULL, 0); /* trigger updates */
+ if (stream->input) {
+ return h2_beam_report_consumption(stream->input);
}
+ return 0;
+}
+
+static int report_consumption_iter(void *ctx, void *val)
+{
+ h2_stream *stream = val;
+ h2_mplx *m = ctx;
+
+ input_consumed_signal(m, stream);
+ if (stream->state == H2_SS_CLOSED_L
+ && (!stream->task || stream->task->worker_done)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
+ H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
+ nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, NGHTTP2_NO_ERROR);
+ }
+ return 1;
}
static int output_consumed_signal(h2_mplx *m, h2_task *task)
{
- if (task->output.beam && task->worker_started && task->assigned) {
- /* trigger updates */
- h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
+ if (task->output.beam) {
+ return h2_beam_report_consumption(task->output.beam);
}
return 0;
}
-
-static void task_destroy(h2_mplx *m, h2_task *task, int called_from_master)
+static void task_destroy(h2_mplx *m, h2_task *task)
{
conn_rec *slave = NULL;
int reuse_slave = 0;
- apr_status_t status;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
- "h2_task(%s): destroy", task->id);
- if (called_from_master) {
- /* Process outstanding events before destruction */
- h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
- if (stream) {
- input_consumed_signal(m, stream);
- }
- }
-
- /* The pool is cleared/destroyed which also closes all
- * allocated file handles. Give this count back to our
- * file handle pool. */
- if (task->output.beam) {
- m->tx_handles_reserved +=
- h2_beam_get_files_beamed(task->output.beam);
- h2_beam_on_produced(task->output.beam, NULL, NULL);
- status = h2_beam_shutdown(task->output.beam, APR_NONBLOCK_READ, 1);
- if (status != APR_SUCCESS){
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, m->c,
- APLOGNO(03385) "h2_task(%s): output shutdown "
- "incomplete", task->id);
- }
- }
slave = task->c;
- reuse_slave = ((m->spare_slaves->nelts < m->spare_slaves->nalloc)
- && !task->rst_error);
-
- h2_ihash_remove(m->tasks, task->stream_id);
- if (m->redo_tasks) {
- h2_ihash_remove(m->redo_tasks, task->stream_id);
- }
- h2_task_destroy(task);
+ if (m->s->keep_alive_max == 0 || slave->keepalives < m->s->keep_alive_max) {
+ reuse_slave = ((m->spare_slaves->nelts < (m->limit_active * 3 / 2))
+ && !task->rst_error);
+ }
+
if (slave) {
if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
+ h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
+ APLOGNO(03385) "h2_task_destroy, reuse slave");
+ h2_task_destroy(task);
APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
}
else {
+ h2_beam_log(task->output.beam, m->c, APLOG_TRACE1,
+ "h2_task_destroy, destroy slave");
slave->sbh = NULL;
- h2_slave_destroy(slave, NULL);
+ h2_slave_destroy(slave);
}
}
-
- check_tx_free(m);
}
-static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error)
-{
- h2_task *task;
+static int stream_destroy_iter(void *ctx, void *val)
+{
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+
+ h2_ihash_remove(m->spurge, stream->id);
+ ap_assert(stream->state == H2_SS_CLEANUP);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
- "h2_stream(%ld-%d): done", m->c->id, stream->id);
- /* Situation: we are, on the master connection, done with processing
- * the stream. Either we have handled it successfully, or the stream
- * was reset by the client or the connection is gone and we are
- * shutting down the whole session.
- *
- * We possibly have created a task for this stream to be processed
- * on a slave connection. The processing might actually be ongoing
- * right now or has already finished. A finished task waits for its
- * stream to be done. This is the common case.
- *
- * If the stream had input (e.g. the request had a body), a task
- * may have read, or is still reading buckets from the input beam.
- * This means that the task is referencing memory from the stream's
- * pool (or the master connection bucket alloc). Before we can free
- * the stream pool, we need to make sure that those references are
- * gone. This is what h2_beam_shutdown() on the input waits for.
- *
- * With the input handled, we can tear down that beam and care
- * about the output beam. The stream might still have buffered some
- * buckets read from the output, so we need to get rid of those. That
- * is done by h2_stream_cleanup().
- *
- * Now it is save to destroy the task (if it exists and is finished).
- *
- * FIXME: we currently destroy the stream, even if the task is still
- * ongoing. This is not ok, since task->request is coming from stream
- * memory. We should either copy it on task creation or wait with the
- * stream destruction until the task is done.
- */
- h2_iq_remove(m->q, stream->id);
- h2_ihash_remove(m->sready, stream->id);
- h2_ihash_remove(m->sresume, stream->id);
- h2_ihash_remove(m->streams, stream->id);
if (stream->input) {
- m->tx_handles_reserved += h2_beam_get_files_beamed(stream->input);
- h2_beam_on_consumed(stream->input, NULL, NULL);
- /* Let anyone blocked reading know that there is no more to come */
- h2_beam_abort(stream->input);
- /* Remove mutex after, so that abort still finds cond to signal */
- h2_beam_mutex_set(stream->input, NULL, NULL, NULL);
+ /* Process outstanding events before destruction */
+ input_consumed_signal(m, stream);
+ h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy");
+ h2_beam_destroy(stream->input);
+ stream->input = NULL;
}
- h2_stream_cleanup(stream);
- task = h2_ihash_get(m->tasks, stream->id);
- if (task) {
- if (!task->worker_done) {
- /* task still running, cleanup once it is done */
- if (rst_error) {
- h2_task_rst(task, rst_error);
- }
- h2_ihash_add(m->shold, stream);
- return;
- }
- else {
- /* already finished */
- task_destroy(m, task, 0);
- }
+ if (stream->task) {
+ task_destroy(m, stream->task);
+ stream->task = NULL;
}
h2_stream_destroy(stream);
+ return 0;
}
-static int stream_done_iter(void *ctx, void *val)
+static void purge_streams(h2_mplx *m, int lock)
{
- stream_done((h2_mplx*)ctx, val, 0);
- return 0;
+ if (!h2_ihash_empty(m->spurge)) {
+ H2_MPLX_ENTER_MAYBE(m, lock);
+ while (!h2_ihash_iter(m->spurge, stream_destroy_iter, m)) {
+ /* repeat until empty */
+ }
+ H2_MPLX_LEAVE_MAYBE(m, lock);
+ }
}
-static int task_print(void *ctx, void *val)
+typedef struct {
+ h2_mplx_stream_cb *cb;
+ void *ctx;
+} stream_iter_ctx_t;
+
+static int stream_iter_wrap(void *ctx, void *stream)
{
- h2_mplx *m = ctx;
- h2_task *task = val;
+ stream_iter_ctx_t *x = ctx;
+ return x->cb(stream, x->ctx);
+}
+
+apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
+{
+ stream_iter_ctx_t x;
+
+ H2_MPLX_ENTER(m);
- if (task && task->request) {
- h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
+ x.cb = cb;
+ x.ctx = ctx;
+ h2_ihash_iter(m->streams, stream_iter_wrap, &x);
+
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
+}
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
- "->03198: h2_stream(%s): %s %s %s -> %s %d"
- "[orph=%d/started=%d/done=%d]",
- task->id, task->request->method,
- task->request->authority, task->request->path,
- task->response? "http" : (task->rst_error? "reset" : "?"),
- task->response? task->response->http_status : task->rst_error,
- (stream? 0 : 1), task->worker_started,
- task->worker_done);
- }
- else if (task) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
- "->03198: h2_stream(%ld-%d): NULL", m->id, task->stream_id);
+static int report_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ h2_task *task = stream->task;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ H2_STRM_MSG(stream, "started=%d, scheduled=%d, ready=%d, "
+ "out_buffer=%ld"),
+ !!stream->task, stream->scheduled, h2_stream_is_ready(stream),
+ (long)h2_beam_get_buffered(stream->output));
+ if (task) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "->03198: %s %s %s"
+ "[started=%d/done=%d/frozen=%d]"),
+ task->request->method, task->request->authority,
+ task->request->path, task->worker_started,
+ task->worker_done, task->frozen);
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
- "->03198: h2_stream(%ld-NULL): NULL", m->id);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "->03198: no task"));
}
return 1;
}
-static int task_abort_connection(void *ctx, void *val)
-{
- h2_task *task = val;
- if (task->c) {
- task->c->aborted = 1;
- }
- if (task->input.beam) {
- h2_beam_abort(task->input.beam);
- }
- if (task->output.beam) {
- h2_beam_abort(task->output.beam);
- }
+static int unexpected_stream_iter(void *ctx, void *val) {
+ h2_mplx *m = ctx;
+ h2_stream *stream = val;
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "unexpected, started=%d, scheduled=%d, ready=%d"),
+ !!stream->task, stream->scheduled, h2_stream_is_ready(stream));
return 1;
}
-static int report_stream_iter(void *ctx, void *val) {
+static int stream_cancel_iter(void *ctx, void *val) {
h2_mplx *m = ctx;
h2_stream *stream = val;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld-%d): exists, started=%d, scheduled=%d, "
- "submitted=%d, suspended=%d",
- m->id, stream->id, stream->started, stream->scheduled,
- stream->submitted, stream->suspended);
- return 1;
+
+ /* disabled input consumed reporting */
+ if (stream->input) {
+ h2_beam_on_consumed(stream->input, NULL, NULL, NULL);
+ }
+ /* take over event monitoring */
+ h2_stream_set_monitor(stream, NULL);
+ /* Reset, should transit to CLOSED state */
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ /* All connection data has been sent, simulate cleanup */
+ h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
+ stream_cleanup(m, stream);
+ return 0;
}
-apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
+void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
{
apr_status_t status;
- int acquired;
+ int i, wait_secs = 60;
+ /* How to shut down a h2 connection:
+ * 0. abort and tell the workers that no more tasks will come from us */
+ m->aborted = 1;
h2_workers_unregister(m->workers, m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- int i, wait_secs = 5;
+ H2_MPLX_ENTER_ALWAYS(m);
- if (!h2_ihash_empty(m->streams) && APLOGctrace1(m->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): release_join with %d streams open, "
- "%d streams resume, %d streams ready, %d tasks",
- m->id, (int)h2_ihash_count(m->streams),
- (int)h2_ihash_count(m->sresume),
- (int)h2_ihash_count(m->sready),
- (int)h2_ihash_count(m->tasks));
- h2_ihash_iter(m->streams, report_stream_iter, m);
- }
-
- /* disable WINDOW_UPDATE callbacks */
- h2_mplx_set_consumed_cb(m, NULL, NULL);
-
- if (!h2_ihash_empty(m->shold)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): start release_join with %d streams in hold",
- m->id, (int)h2_ihash_count(m->shold));
- }
- if (!h2_ihash_empty(m->spurge)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): start release_join with %d streams to purge",
- m->id, (int)h2_ihash_count(m->spurge));
- }
-
- h2_iq_clear(m->q);
- apr_thread_cond_broadcast(m->task_thawed);
- while (!h2_ihash_iter(m->streams, stream_done_iter, m)) {
- /* iterate until all streams have been removed */
- }
- AP_DEBUG_ASSERT(h2_ihash_empty(m->streams));
-
- if (!h2_ihash_empty(m->shold)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): 2. release_join with %d streams in hold",
- m->id, (int)h2_ihash_count(m->shold));
- }
- if (!h2_ihash_empty(m->spurge)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): 2. release_join with %d streams to purge",
- m->id, (int)h2_ihash_count(m->spurge));
- }
-
- /* If we still have busy workers, we cannot release our memory
- * pool yet, as tasks have references to us.
- * Any operation on the task slave connection will from now on
- * be errored ECONNRESET/ABORTED, so processing them should fail
- * and workers *should* return in a timely fashion.
- */
- for (i = 0; m->workers_busy > 0; ++i) {
- h2_ihash_iter(m->tasks, task_abort_connection, m);
-
- m->join_wait = wait;
- status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs));
-
- if (APR_STATUS_IS_TIMEUP(status)) {
- if (i > 0) {
- /* Oh, oh. Still we wait for assigned workers to report that
- * they are done. Unless we have a bug, a worker seems to be hanging.
- * If we exit now, all will be deallocated and the worker, once
- * it does return, will walk all over freed memory...
- */
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03198)
- "h2_mplx(%ld): release, waiting for %d seconds now for "
- "%d h2_workers to return, have still %d tasks outstanding",
- m->id, i*wait_secs, m->workers_busy,
- (int)h2_ihash_count(m->tasks));
- if (i == 1) {
- h2_ihash_iter(m->tasks, task_print, m);
- }
- }
- h2_mplx_abort(m);
- apr_thread_cond_broadcast(m->task_thawed);
- }
- }
-
- AP_DEBUG_ASSERT(h2_ihash_empty(m->shold));
- if (!h2_ihash_empty(m->spurge)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): 3. release_join %d streams to purge",
- m->id, (int)h2_ihash_count(m->spurge));
- purge_streams(m);
- }
- AP_DEBUG_ASSERT(h2_ihash_empty(m->spurge));
-
- if (!h2_ihash_empty(m->tasks)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03056)
- "h2_mplx(%ld): release_join -> destroy, "
- "%d tasks still present",
- m->id, (int)h2_ihash_count(m->tasks));
- }
- leave_mutex(m, acquired);
- h2_mplx_destroy(m);
- /* all gone */
+ /* How to shut down a h2 connection:
+ * 1. cancel all streams still active */
+ while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
+ /* until empty */
+ }
+
+ /* 2. terminate ngn_shed, no more streams
+ * should be scheduled or in the active set */
+ h2_ngn_shed_abort(m->ngn_shed);
+ ap_assert(h2_ihash_empty(m->streams));
+ ap_assert(h2_iq_empty(m->q));
+
+ /* 3. while workers are busy on this connection, meaning they
+ * are processing tasks from this connection, wait on them finishing
+ * in order to wake us and let us check again.
+ * Eventually, this has to succeed. */
+ m->join_wait = wait;
+ for (i = 0; h2_ihash_count(m->shold) > 0; ++i) {
+ status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs));
+
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ /* This can happen if we have very long running requests
+ * that do not time out on IO. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198)
+ "h2_mplx(%ld): waited %d sec for %d tasks",
+ m->id, i*wait_secs, (int)h2_ihash_count(m->shold));
+ h2_ihash_iter(m->shold, report_stream_iter, m);
+ }
+ }
+ m->join_wait = NULL;
+
+ /* 4. close the h2_req_enginge shed */
+ h2_ngn_shed_destroy(m->ngn_shed);
+ m->ngn_shed = NULL;
+
+ /* 4. With all workers done, all streams should be in spurge */
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
+ "h2_mplx(%ld): unexpected %d streams in hold",
+ m->id, (int)h2_ihash_count(m->shold));
+ h2_ihash_iter(m->shold, unexpected_stream_iter, m);
}
- return status;
+
+ H2_MPLX_LEAVE(m);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): released", m->id);
}
-void h2_mplx_abort(h2_mplx *m)
+apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
{
- int acquired;
+ H2_MPLX_ENTER(m);
- AP_DEBUG_ASSERT(m);
- if (!m->aborted && enter_mutex(m, &acquired) == APR_SUCCESS) {
- m->aborted = 1;
- h2_ngn_shed_abort(m->ngn_shed);
- leave_mutex(m, acquired);
- }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "cleanup"));
+ stream_cleanup(m, stream);
+
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
}
-apr_status_t h2_mplx_stream_done(h2_mplx *m, h2_stream *stream)
+h2_stream *h2_mplx_stream_get(h2_mplx *m, int id)
{
- apr_status_t status = APR_SUCCESS;
- int acquired;
+ h2_stream *s = NULL;
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld-%d): marking stream as done.",
- m->id, stream->id);
- stream_done(m, stream, stream->rst_error);
- purge_streams(m);
- leave_mutex(m, acquired);
- }
- return status;
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ s = h2_ihash_get(m->streams, id);
+
+ H2_MPLX_LEAVE(m);
+ return s;
}
-void h2_mplx_set_consumed_cb(h2_mplx *m, h2_mplx_consumed_cb *cb, void *ctx)
+static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
{
- m->input_consumed = cb;
- m->input_consumed_ctx = ctx;
+ h2_stream *stream = ctx;
+ h2_mplx *m = stream->session->mplx;
+
+ check_data_for(m, stream, 1);
}
-static apr_status_t out_open(h2_mplx *m, int stream_id, h2_response *response)
+static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
{
apr_status_t status = APR_SUCCESS;
- h2_task *task = h2_ihash_get(m->tasks, stream_id);
h2_stream *stream = h2_ihash_get(m->streams, stream_id);
- if (!task || !stream) {
+ if (!stream || !stream->task || m->aborted) {
return APR_ECONNABORTED;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%s): open response: %d, rst=%d",
- task->id, response->http_status, response->rst_error);
-
- h2_task_set_response(task, response);
+ ap_assert(stream->output == NULL);
+ stream->output = beam;
- if (task->output.beam) {
- h2_beam_buffer_size_set(task->output.beam, m->stream_max_mem);
- h2_beam_timeout_set(task->output.beam, m->stream_timeout);
- h2_beam_on_consumed(task->output.beam, stream_output_consumed, task);
- m->tx_handles_reserved -= h2_beam_get_files_beamed(task->output.beam);
- h2_beam_on_file_beam(task->output.beam, can_beam_file, m);
- h2_beam_mutex_set(task->output.beam, beam_enter, task->cond, m);
+ if (APLOGctrace2(m->c)) {
+ h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open");
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
+ "h2_mplx(%s): out open", stream->task->id);
}
- h2_ihash_add(m->sready, stream);
- if (response && response->http_status < 300) {
- /* we might see some file buckets in the output, see
- * if we have enough handles reserved. */
- check_tx_reservation(m);
+ h2_beam_on_consumed(stream->output, NULL, stream_output_consumed, stream);
+ h2_beam_on_produced(stream->output, output_produced, stream);
+ if (stream->task->output.copy_files) {
+ h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL);
}
- have_out_data_for(m, stream_id);
+
+ /* we might see some file buckets in the output, see
+ * if we have enough handles reserved. */
+ check_data_for(m, stream, 0);
return status;
}
-apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response)
+apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
{
apr_status_t status;
- int acquired;
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- if (m->aborted) {
- status = APR_ECONNABORTED;
- }
- else {
- status = out_open(m, stream_id, response);
- }
- leave_mutex(m, acquired);
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
}
+ else {
+ status = out_open(m, stream_id, beam);
+ }
+
+ H2_MPLX_LEAVE(m);
return status;
}
@@ -726,31 +574,21 @@ static apr_status_t out_close(h2_mplx *m
if (!task) {
return APR_ECONNABORTED;
}
-
+ if (task->c) {
+ ++task->c->keepalives;
+ }
+
stream = h2_ihash_get(m->streams, task->stream_id);
if (!stream) {
return APR_ECONNABORTED;
}
- if (!task->response && !task->rst_error) {
- /* In case a close comes before a response was created,
- * insert an error one so that our streams can properly reset.
- */
- h2_response *r = h2_response_die(task->stream_id, 500,
- task->request, m->pool);
- status = out_open(m, task->stream_id, r);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, APLOGNO(03393)
- "h2_mplx(%s): close, no response, no rst", task->id);
- }
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
"h2_mplx(%s): close", task->id);
- if (task->output.beam) {
- status = h2_beam_close(task->output.beam);
- h2_beam_log(task->output.beam, task->stream_id, "out_close", m->c,
- APLOG_TRACE2);
- }
+ status = h2_beam_close(task->output.beam);
+ h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close");
output_consumed_signal(m, task);
- have_out_data_for(m, task->stream_id);
+ check_data_for(m, stream, 0);
return status;
}
@@ -758,313 +596,305 @@ apr_status_t h2_mplx_out_trywait(h2_mplx
apr_thread_cond_t *iowait)
{
apr_status_t status;
- int acquired;
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- if (m->aborted) {
- status = APR_ECONNABORTED;
- }
- else if (!h2_ihash_empty(m->sready) || !h2_ihash_empty(m->sresume)) {
- status = APR_SUCCESS;
- }
- else {
- purge_streams(m);
- m->added_output = iowait;
- status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
- if (APLOGctrace2(m->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): trywait on data for %f ms)",
- m->id, timeout/1000.0);
- }
- m->added_output = NULL;
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else if (h2_mplx_has_master_events(m)) {
+ status = APR_SUCCESS;
+ }
+ else {
+ purge_streams(m, 0);
+ h2_ihash_iter(m->streams, report_consumption_iter, m);
+ m->added_output = iowait;
+ status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
+ if (APLOGctrace2(m->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): trywait on data for %f ms)",
+ m->id, timeout/1000.0);
}
- leave_mutex(m, acquired);
+ m->added_output = NULL;
}
+
+ H2_MPLX_LEAVE(m);
return status;
}
-static void have_out_data_for(h2_mplx *m, int stream_id)
+static void check_data_for(h2_mplx *m, h2_stream *stream, int lock)
{
- (void)stream_id;
- AP_DEBUG_ASSERT(m);
- if (m->added_output) {
- apr_thread_cond_signal(m->added_output);
+ if (h2_ififo_push(m->readyq, stream->id) == APR_SUCCESS) {
+ apr_atomic_set32(&m->event_pending, 1);
+ H2_MPLX_ENTER_MAYBE(m, lock);
+ if (m->added_output) {
+ apr_thread_cond_signal(m->added_output);
+ }
+ H2_MPLX_LEAVE_MAYBE(m, lock);
}
}
apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
{
apr_status_t status;
- int acquired;
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- if (m->aborted) {
- status = APR_ECONNABORTED;
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ h2_iq_sort(m->q, cmp, ctx);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): reprioritize tasks", m->id);
+ status = APR_SUCCESS;
+ }
+
+ H2_MPLX_LEAVE(m);
+ return status;
+}
+
+static void register_if_needed(h2_mplx *m)
+{
+ if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) {
+ apr_status_t status = h2_workers_register(m->workers, m);
+ if (status == APR_SUCCESS) {
+ m->is_registered = 1;
}
else {
- h2_iq_sort(m->q, cmp, ctx);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): reprioritize tasks", m->id);
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021)
+ "h2_mplx(%ld): register at workers", m->id);
}
- leave_mutex(m, acquired);
}
- return status;
}
apr_status_t h2_mplx_process(h2_mplx *m, struct h2_stream *stream,
h2_stream_pri_cmp *cmp, void *ctx)
{
apr_status_t status;
- int do_registration = 0;
- int acquired;
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- if (m->aborted) {
- status = APR_ECONNABORTED;
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ status = APR_ECONNABORTED;
+ }
+ else {
+ status = APR_SUCCESS;
+ h2_ihash_add(m->streams, stream);
+ if (h2_stream_is_ready(stream)) {
+ /* already have a response */
+ check_data_for(m, stream, 0);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ H2_STRM_MSG(stream, "process, add to readyq"));
}
else {
- h2_ihash_add(m->streams, stream);
- if (stream->response) {
- /* already have a respone, schedule for submit */
- h2_ihash_add(m->sready, stream);
- }
- else {
- h2_beam_create(&stream->input, stream->pool, stream->id,
- "input", 0);
- if (!m->need_registration) {
- m->need_registration = h2_iq_empty(m->q);
- }
- if (m->workers_busy < m->workers_max) {
- do_registration = m->need_registration;
- }
- h2_iq_add(m->q, stream->id, cmp, ctx);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c,
- "h2_mplx(%ld-%d): process, body=%d",
- m->c->id, stream->id, stream->request->body);
- }
+ h2_iq_add(m->q, stream->id, cmp, ctx);
+ register_if_needed(m);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ H2_STRM_MSG(stream, "process, added to q"));
}
- leave_mutex(m, acquired);
- }
- if (do_registration) {
- m->need_registration = 0;
- h2_workers_register(m->workers, m);
}
+
+ H2_MPLX_LEAVE(m);
return status;
}
-static h2_task *pop_task(h2_mplx *m)
+static h2_task *next_stream_task(h2_mplx *m)
{
- h2_task *task = NULL;
h2_stream *stream;
int sid;
- while (!m->aborted && !task && (m->workers_busy < m->workers_limit)
+ while (!m->aborted && (m->tasks_active < m->limit_active)
&& (sid = h2_iq_shift(m->q)) > 0) {
stream = h2_ihash_get(m->streams, sid);
if (stream) {
conn_rec *slave, **pslave;
- int new_conn = 0;
pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
if (pslave) {
slave = *pslave;
+ slave->aborted = 0;
}
else {
- slave = h2_slave_create(m->c, m->pool, NULL);
- new_conn = 1;
+ slave = h2_slave_create(m->c, stream->id, m->pool);
}
- slave->sbh = m->c->sbh;
- slave->aborted = 0;
- task = h2_task_create(slave, stream->request, stream->input, m);
- h2_ihash_add(m->tasks, task);
-
- m->c->keepalives++;
- apr_table_setn(slave->notes, H2_TASK_ID_NOTE, task->id);
- if (new_conn) {
- h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave));
- }
- stream->started = 1;
- task->worker_started = 1;
- task->started_at = apr_time_now();
- if (sid > m->max_stream_started) {
- m->max_stream_started = sid;
- }
+ if (!stream->task) {
- if (stream->input) {
- h2_beam_timeout_set(stream->input, m->stream_timeout);
- h2_beam_on_consumed(stream->input, stream_input_consumed, m);
- h2_beam_on_file_beam(stream->input, can_beam_file, m);
- h2_beam_mutex_set(stream->input, beam_enter, task->cond, m);
+ if (sid > m->max_stream_started) {
+ m->max_stream_started = sid;
+ }
+ if (stream->input) {
+ h2_beam_on_consumed(stream->input, stream_input_ev,
+ stream_input_consumed, stream);
+ }
+
+ stream->task = h2_task_create(slave, stream->id,
+ stream->request, m, stream->input,
+ stream->session->s->timeout,
+ m->stream_max_mem);
+ if (!stream->task) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, slave,
+ H2_STRM_LOG(APLOGNO(02941), stream,
+ "create task"));
+ return NULL;
+ }
+
}
-
- ++m->workers_busy;
+
+ ++m->tasks_active;
+ return stream->task;
}
}
- return task;
+ return NULL;
}
-h2_task *h2_mplx_pop_task(h2_mplx *m, int *has_more)
+apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask)
{
- h2_task *task = NULL;
- apr_status_t status;
- int acquired;
+ apr_status_t rv = APR_EOF;
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- if (m->aborted) {
- *has_more = 0;
- }
- else {
- task = pop_task(m);
- *has_more = !h2_iq_empty(m->q);
- }
-
- if (has_more && !task) {
- m->need_registration = 1;
- }
- leave_mutex(m, acquired);
+ *ptask = NULL;
+ if (APR_SUCCESS != (rv = apr_thread_mutex_lock(m->lock))) {
+ return rv;
+ }
+
+ if (m->aborted) {
+ rv = APR_EOF;
}
- return task;
+ else {
+ *ptask = next_stream_task(m);
+ rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS;
+ }
+ if (APR_EAGAIN != rv) {
+ m->is_registered = 0; /* h2_workers will discard this mplx */
+ }
+ H2_MPLX_LEAVE(m);
+ return rv;
}
static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
{
+ h2_stream *stream;
+
if (task->frozen) {
/* this task was handed over to an engine for processing
* and the original worker has finished. That means the
* engine may start processing now. */
h2_task_thaw(task);
- /* we do not want the task to block on writing response
- * bodies into the mplx. */
- h2_task_set_io_blocking(task, 0);
apr_thread_cond_broadcast(m->task_thawed);
return;
}
- else {
- h2_stream *stream;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): task(%s) done", m->id, task->id);
- out_close(m, task);
- stream = h2_ihash_get(m->streams, task->stream_id);
-
- if (ngn) {
- apr_off_t bytes = 0;
- if (task->output.beam) {
- h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
- bytes += h2_beam_get_buffered(task->output.beam);
- }
- if (bytes > 0) {
- /* we need to report consumed and current buffered output
- * to the engine. The request will be streamed out or cancelled,
- * no more data is coming from it and the engine should update
- * its calculations before we destroy this information. */
- h2_req_engine_out_consumed(ngn, task->c, bytes);
- }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): task(%s) done", m->id, task->id);
+ out_close(m, task);
+
+ if (ngn) {
+ apr_off_t bytes = 0;
+ h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
+ bytes += h2_beam_get_buffered(task->output.beam);
+ if (bytes > 0) {
+ /* we need to report consumed and current buffered output
+ * to the engine. The request will be streamed out or cancelled,
+ * no more data is coming from it and the engine should update
+ * its calculations before we destroy this information. */
+ h2_req_engine_out_consumed(ngn, task->c, bytes);
}
-
- if (task->engine) {
- if (!h2_req_engine_is_shutdown(task->engine)) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
- "h2_mplx(%ld): task(%s) has not-shutdown "
- "engine(%s)", m->id, task->id,
- h2_req_engine_get_id(task->engine));
- }
- h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
+ }
+
+ if (task->engine) {
+ if (!m->aborted && !task->c->aborted
+ && !h2_req_engine_is_shutdown(task->engine)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(10022)
+ "h2_mplx(%ld): task(%s) has not-shutdown "
+ "engine(%s)", m->id, task->id,
+ h2_req_engine_get_id(task->engine));
+ }
+ h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
+ }
+
+ task->worker_done = 1;
+ task->done_at = apr_time_now();
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): request done, %f ms elapsed", task->id,
+ (task->done_at - task->started_at) / 1000.0);
+
+ if (task->started_at > m->last_idle_block) {
+ /* this task finished without causing an 'idle block', e.g.
+ * a block by flow control.
+ */
+ if (task->done_at- m->last_limit_change >= m->limit_change_interval
+ && m->limit_active < m->max_active) {
+ /* Well behaving stream, allow it more workers */
+ m->limit_active = H2MIN(m->limit_active * 2,
+ m->max_active);
+ m->last_limit_change = task->done_at;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): increase worker limit to %d",
+ m->id, m->limit_active);
}
-
- if (!m->aborted && stream && m->redo_tasks
- && h2_ihash_get(m->redo_tasks, task->stream_id)) {
+ }
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (stream) {
+ /* stream not done yet. */
+ if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
/* reset and schedule again */
h2_task_redo(task);
- h2_ihash_remove(m->redo_tasks, task->stream_id);
- h2_iq_add(m->q, task->stream_id, NULL, NULL);
- return;
- }
-
- task->worker_done = 1;
- task->done_at = apr_time_now();
- if (task->output.beam) {
- h2_beam_on_consumed(task->output.beam, NULL, NULL);
- h2_beam_mutex_set(task->output.beam, NULL, NULL, NULL);
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%s): request done, %f ms elapsed", task->id,
- (task->done_at - task->started_at) / 1000.0);
- if (task->started_at > m->last_idle_block) {
- /* this task finished without causing an 'idle block', e.g.
- * a block by flow control.
- */
- if (task->done_at- m->last_limit_change >= m->limit_change_interval
- && m->workers_limit < m->workers_max) {
- /* Well behaving stream, allow it more workers */
- m->workers_limit = H2MIN(m->workers_limit * 2,
- m->workers_max);
- m->last_limit_change = task->done_at;
- m->need_registration = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): increase worker limit to %d",
- m->id, m->workers_limit);
- }
- }
-
- if (stream) {
- /* hang around until the stream deregisters */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%s): task_done, stream still open",
- task->id);
- if (h2_stream_is_suspended(stream)) {
- /* more data will not arrive, resume the stream */
- h2_ihash_add(m->sresume, stream);
- have_out_data_for(m, stream->id);
- }
+ h2_ihash_remove(m->sredo, stream->id);
+ h2_iq_add(m->q, stream->id, NULL, NULL);
}
else {
- /* stream no longer active, was it placed in hold? */
- stream = h2_ihash_get(m->shold, task->stream_id);
- if (stream) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%s): task_done, stream in hold",
- task->id);
- /* We cannot destroy the stream here since this is
- * called from a worker thread and freeing memory pools
- * is only safe in the only thread using it (and its
- * parent pool / allocator) */
- h2_ihash_remove(m->shold, stream->id);
- h2_ihash_add(m->spurge, stream);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%s): task_done, stream not found",
- task->id);
- task_destroy(m, task, 0);
- }
-
- if (m->join_wait) {
- apr_thread_cond_signal(m->join_wait);
+ /* stream not cleaned up, stay around */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "task_done, stream open"));
+ if (stream->input) {
+ h2_beam_leave(stream->input);
}
+
+ /* more data will not arrive, resume the stream */
+ check_data_for(m, stream, 0);
}
}
+ else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
+ /* stream is done, was just waiting for this. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "task_done, in hold"));
+ if (stream->input) {
+ h2_beam_leave(stream->input);
+ }
+ stream_joined(m, stream);
+ }
+ else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
+ H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
+ ap_assert("stream should not be in spurge" == NULL);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518)
+ "h2_mplx(%s): task_done, stream not found",
+ task->id);
+ ap_assert("stream should still be available" == NULL);
+ }
}
void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
{
- int acquired;
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ task_done(m, task, NULL);
+ --m->tasks_active;
- if (enter_mutex(m, &acquired) == APR_SUCCESS) {
- task_done(m, task, NULL);
- --m->workers_busy;
- if (ptask) {
- /* caller wants another task */
- *ptask = pop_task(m);
- }
- leave_mutex(m, acquired);
+ if (m->join_wait) {
+ apr_thread_cond_signal(m->join_wait);
+ }
+ if (ptask) {
+ /* caller wants another task */
+ *ptask = next_stream_task(m);
}
+ register_if_needed(m);
+
+ H2_MPLX_LEAVE(m);
}
/*******************************************************************************
@@ -1073,74 +903,76 @@ void h2_mplx_task_done(h2_mplx *m, h2_ta
static int latest_repeatable_unsubmitted_iter(void *data, void *val)
{
- task_iter_ctx *ctx = data;
- h2_task *task = val;
- if (!task->worker_done && h2_task_can_redo(task)
- && !h2_ihash_get(ctx->m->redo_tasks, task->stream_id)) {
- /* this task occupies a worker, the response has not been submitted yet,
- * not been cancelled and it is a repeatable request
- * -> it can be re-scheduled later */
- if (!ctx->task || ctx->task->started_at < task->started_at) {
- /* we did not have one or this one was started later */
- ctx->task = task;
+ stream_iter_ctx *ctx = data;
+ h2_stream *stream = val;
+
+ if (stream->task && !stream->task->worker_done
+ && h2_task_can_redo(stream->task)
+ && !h2_ihash_get(ctx->m->sredo, stream->id)) {
+ if (!h2_stream_is_ready(stream)) {
+ /* this task occupies a worker, the response has not been submitted
+ * yet, not been cancelled and it is a repeatable request
+ * -> it can be re-scheduled later */
+ if (!ctx->stream
+ || (ctx->stream->task->started_at < stream->task->started_at)) {
+ /* we did not have one or this one was started later */
+ ctx->stream = stream;
+ }
}
}
return 1;
}
-static h2_task *get_latest_repeatable_unsubmitted_task(h2_mplx *m)
+static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m)
{
- task_iter_ctx ctx;
+ stream_iter_ctx ctx;
ctx.m = m;
- ctx.task = NULL;
- h2_ihash_iter(m->tasks, latest_repeatable_unsubmitted_iter, &ctx);
- return ctx.task;
+ ctx.stream = NULL;
+ h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
+ return ctx.stream;
}
static int timed_out_busy_iter(void *data, void *val)
{
- task_iter_ctx *ctx = data;
- h2_task *task = val;
- if (!task->worker_done
- && (ctx->now - task->started_at) > ctx->m->stream_timeout) {
+ stream_iter_ctx *ctx = data;
+ h2_stream *stream = val;
+ if (stream->task && !stream->task->worker_done
+ && (ctx->now - stream->task->started_at) > stream->task->timeout) {
/* timed out stream occupying a worker, found */
- ctx->task = task;
+ ctx->stream = stream;
return 0;
}
return 1;
}
-static h2_task *get_timed_out_busy_task(h2_mplx *m)
+static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
{
- task_iter_ctx ctx;
+ stream_iter_ctx ctx;
ctx.m = m;
- ctx.task = NULL;
+ ctx.stream = NULL;
ctx.now = apr_time_now();
- h2_ihash_iter(m->tasks, timed_out_busy_iter, &ctx);
- return ctx.task;
+ h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
+ return ctx.stream;
}
static apr_status_t unschedule_slow_tasks(h2_mplx *m)
{
- h2_task *task;
+ h2_stream *stream;
int n;
- if (!m->redo_tasks) {
- m->redo_tasks = h2_ihash_create(m->pool, offsetof(h2_task, stream_id));
- }
/* Try to get rid of streams that occupy workers. Look for safe requests
* that are repeatable. If none found, fail the connection.
*/
- n = (m->workers_busy - m->workers_limit - h2_ihash_count(m->redo_tasks));
- while (n > 0 && (task = get_latest_repeatable_unsubmitted_task(m))) {
- h2_task_rst(task, H2_ERR_CANCEL);
- h2_ihash_add(m->redo_tasks, task);
+ n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
+ while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
+ h2_task_rst(stream->task, H2_ERR_CANCEL);
+ h2_ihash_add(m->sredo, stream);
--n;
}
- if ((m->workers_busy - h2_ihash_count(m->redo_tasks)) > m->workers_limit) {
- task = get_timed_out_busy_task(m);
- if (task) {
+ if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) {
+ h2_stream *stream = get_timed_out_busy_stream(m);
+ if (stream) {
/* Too many busy workers, unable to cancel enough streams
* and with a busy, timed out stream, we tell the client
* to go away... */
@@ -1154,11 +986,13 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
{
apr_status_t status = APR_SUCCESS;
apr_time_t now;
- int acquired;
+ apr_size_t scount;
- if (enter_mutex(m, &acquired) == APR_SUCCESS) {
- apr_size_t scount = h2_ihash_count(m->streams);
- if (scount > 0 && m->workers_busy) {
+ H2_MPLX_ENTER(m);
+
+ scount = h2_ihash_count(m->streams);
+ if (scount > 0) {
+ if (m->tasks_active) {
/* If we have streams in connection state 'IDLE', meaning
* all streams are ready to sent data out, but lack
* WINDOW_UPDATEs.
@@ -1173,32 +1007,68 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
*/
now = apr_time_now();
m->last_idle_block = now;
- if (m->workers_limit > 2
+ if (m->limit_active > 2
&& now - m->last_limit_change >= m->limit_change_interval) {
- if (m->workers_limit > 16) {
- m->workers_limit = 16;
+ if (m->limit_active > 16) {
+ m->limit_active = 16;
}
- else if (m->workers_limit > 8) {
- m->workers_limit = 8;
+ else if (m->limit_active > 8) {
+ m->limit_active = 8;
}
- else if (m->workers_limit > 4) {
- m->workers_limit = 4;
+ else if (m->limit_active > 4) {
+ m->limit_active = 4;
}
- else if (m->workers_limit > 2) {
- m->workers_limit = 2;
+ else if (m->limit_active > 2) {
+ m->limit_active = 2;
}
m->last_limit_change = now;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
"h2_mplx(%ld): decrease worker limit to %d",
- m->id, m->workers_limit);
+ m->id, m->limit_active);
}
- if (m->workers_busy > m->workers_limit) {
+ if (m->tasks_active > m->limit_active) {
status = unschedule_slow_tasks(m);
}
}
- leave_mutex(m, acquired);
+ else if (!h2_iq_empty(m->q)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): idle, but %d streams to process",
+ m->id, (int)h2_iq_count(m->q));
+ status = APR_EAGAIN;
+ }
+ else {
+ /* idle, have streams, but no tasks active. what are we waiting for?
+ * WINDOW_UPDATEs from client? */
+ h2_stream *stream = NULL;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): idle, no tasks ongoing, %d streams",
+ m->id, (int)h2_ihash_count(m->streams));
+ h2_ihash_shift(m->streams, (void**)&stream, 1);
+ if (stream) {
+ h2_ihash_add(m->streams, stream);
+ if (stream->output && !stream->out_checked) {
+ /* FIXME: this looks like a race between the session thinking
+ * it is idle and the EOF on a stream not being sent.
+ * Signal to caller to leave IDLE state.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ H2_STRM_MSG(stream, "output closed=%d, mplx idle"
+ ", out has %ld bytes buffered"),
+ h2_beam_is_closed(stream->output),
+ (long)h2_beam_get_buffered(stream->output));
+ h2_ihash_add(m->streams, stream);
+ check_data_for(m, stream, 0);
+ stream->out_checked = 1;
+ status = APR_EAGAIN;
+ }
+ }
+ }
}
+ register_if_needed(m);
+
+ H2_MPLX_LEAVE(m);
return status;
}
@@ -1215,9 +1085,9 @@ typedef struct {
static int ngn_update_window(void *ctx, void *val)
{
ngn_update_ctx *uctx = ctx;
- h2_task *task = val;
- if (task && task->assigned == uctx->ngn
- && output_consumed_signal(uctx->m, task)) {
+ h2_stream *stream = val;
+ if (stream->task && stream->task->assigned == uctx->ngn
+ && output_consumed_signal(uctx->m, stream->task)) {
++uctx->streams_updated;
}
return 1;
@@ -1230,7 +1100,7 @@ static apr_status_t ngn_out_update_windo
ctx.m = m;
ctx.ngn = ngn;
ctx.streams_updated = 0;
- h2_ihash_iter(m->tasks, ngn_update_window, &ctx);
+ h2_ihash_iter(m->streams, ngn_update_window, &ctx);
return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN;
}
@@ -1242,92 +1112,99 @@ apr_status_t h2_mplx_req_engine_push(con
apr_status_t status;
h2_mplx *m;
h2_task *task;
- int acquired;
+ h2_stream *stream;
task = h2_ctx_rget_task(r);
if (!task) {
return APR_ECONNABORTED;
}
m = task->mplx;
- task->r = r;
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- h2_stream *stream = h2_ihash_get(m->streams, task->stream_id);
-
- if (stream) {
- status = h2_ngn_shed_push_task(m->ngn_shed, ngn_type, task, einit);
- }
- else {
- status = APR_ECONNABORTED;
- }
- leave_mutex(m, acquired);
+ H2_MPLX_ENTER(m);
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (stream) {
+ status = h2_ngn_shed_push_request(m->ngn_shed, ngn_type, r, einit);
}
+ else {
+ status = APR_ECONNABORTED;
+ }
+
+ H2_MPLX_LEAVE(m);
return status;
}
apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn,
apr_read_type_e block,
- apr_uint32_t capacity,
+ int capacity,
request_rec **pr)
{
h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn);
h2_mplx *m = h2_ngn_shed_get_ctx(shed);
apr_status_t status;
- h2_task *task = NULL;
- int acquired;
+ int want_shutdown;
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- int want_shutdown = (block == APR_BLOCK_READ);
+ H2_MPLX_ENTER(m);
- /* Take this opportunity to update output consummation
- * for this engine */
- ngn_out_update_windows(m, ngn);
-
- if (want_shutdown && !h2_iq_empty(m->q)) {
- /* For a blocking read, check first if requests are to be
- * had and, if not, wait a short while before doing the
- * blocking, and if unsuccessful, terminating read.
- */
- status = h2_ngn_shed_pull_task(shed, ngn, capacity, 1, &task);
- if (APR_STATUS_IS_EAGAIN(status)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): start block engine pull", m->id);
- apr_thread_cond_timedwait(m->task_thawed, m->lock,
- apr_time_from_msec(20));
- status = h2_ngn_shed_pull_task(shed, ngn, capacity, 1, &task);
- }
- }
- else {
- status = h2_ngn_shed_pull_task(shed, ngn, capacity,
- want_shutdown, &task);
+ want_shutdown = (block == APR_BLOCK_READ);
+
+ /* Take this opportunity to update output consummation
+ * for this engine */
+ ngn_out_update_windows(m, ngn);
+
+ if (want_shutdown && !h2_iq_empty(m->q)) {
+ /* For a blocking read, check first if requests are to be
+ * had and, if not, wait a short while before doing the
+ * blocking, and if unsuccessful, terminating read.
+ */
+ status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ "h2_mplx(%ld): start block engine pull", m->id);
+ apr_thread_cond_timedwait(m->task_thawed, m->lock,
+ apr_time_from_msec(20));
+ status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
}
- leave_mutex(m, acquired);
}
- *pr = task? task->r : NULL;
+ else {
+ status = h2_ngn_shed_pull_request(shed, ngn, capacity,
+ want_shutdown, pr);
+ }
+
+ H2_MPLX_LEAVE(m);
return status;
}
-void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn)
+void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
+ apr_status_t status)
{
h2_task *task = h2_ctx_cget_task(r_conn);
if (task) {
h2_mplx *m = task->mplx;
- int acquired;
+ h2_stream *stream;
- if (enter_mutex(m, &acquired) == APR_SUCCESS) {
- ngn_out_update_windows(m, ngn);
- h2_ngn_shed_done_task(m->ngn_shed, ngn, task);
- if (task->engine) {
- /* cannot report that as done until engine returns */
- }
- else {
- task_done(m, task, ngn);
- }
- /* Take this opportunity to update output consummation
- * for this engine */
- leave_mutex(m, acquired);
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+
+ ngn_out_update_windows(m, ngn);
+ h2_ngn_shed_done_task(m->ngn_shed, ngn, task);
+
+ if (status != APR_SUCCESS && stream
+ && h2_task_can_redo(task)
+ && !h2_ihash_get(m->sredo, stream->id)) {
+ h2_ihash_add(m->sredo, stream);
+ }
+
+ if (task->engine) {
+ /* cannot report that as done until engine returns */
}
+ else {
+ task_done(m, task, ngn);
+ }
+
+ H2_MPLX_LEAVE(m);
}
}
@@ -1335,124 +1212,59 @@ void h2_mplx_req_engine_done(h2_req_engi
* mplx master events dispatching
******************************************************************************/
-static int update_window(void *ctx, void *val)
+int h2_mplx_has_master_events(h2_mplx *m)
{
- input_consumed_signal(ctx, val);
- return 1;
+ return apr_atomic_read32(&m->event_pending) > 0;
}
apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
stream_ev_callback *on_resume,
- stream_ev_callback *on_response,
void *on_ctx)
{
- apr_status_t status;
- int acquired;
- int streams[32];
h2_stream *stream;
- h2_task *task;
- size_t i, n;
+ int n, id;
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
- "h2_mplx(%ld): dispatch events", m->id);
-
- /* update input windows for streams */
- h2_ihash_iter(m->streams, update_window, m);
-
- if (on_response && !h2_ihash_empty(m->sready)) {
- n = h2_ihash_ishift(m->sready, streams, H2_ALEN(streams));
- for (i = 0; i < n; ++i) {
- stream = h2_ihash_get(m->streams, streams[i]);
- if (!stream) {
- continue;
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
- "h2_mplx(%ld-%d): on_response",
- m->id, stream->id);
- task = h2_ihash_get(m->tasks, stream->id);
- if (task) {
- task->submitted = 1;
- if (task->rst_error) {
- h2_stream_rst(stream, task->rst_error);
- }
- else {
- AP_DEBUG_ASSERT(task->response);
- h2_stream_set_response(stream, task->response, task->output.beam);
- }
- }
- else {
- /* We have the stream ready without a task. This happens
- * when we fail streams early. A response should already
- * be present. */
- AP_DEBUG_ASSERT(stream->response || stream->rst_error);
- }
- status = on_response(on_ctx, stream->id);
- }
- }
-
- if (on_resume && !h2_ihash_empty(m->sresume)) {
- n = h2_ihash_ishift(m->sresume, streams, H2_ALEN(streams));
- for (i = 0; i < n; ++i) {
- stream = h2_ihash_get(m->streams, streams[i]);
- if (!stream) {
- continue;
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c,
- "h2_mplx(%ld-%d): on_resume",
- m->id, stream->id);
- h2_stream_set_suspended(stream, 0);
- status = on_resume(on_ctx, stream->id);
- }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%ld): dispatch events", m->id);
+ apr_atomic_set32(&m->event_pending, 0);
+
+ /* update input windows for streams */
+ h2_ihash_iter(m->streams, report_consumption_iter, m);
+ purge_streams(m, 1);
+
+ n = h2_ififo_count(m->readyq);
+ while (n > 0
+ && (h2_ififo_try_pull(m->readyq, &id) == APR_SUCCESS)) {
+ --n;
+ stream = h2_ihash_get(m->streams, id);
+ if (stream) {
+ on_resume(on_ctx, stream);
}
-
- leave_mutex(m, acquired);
}
- return status;
+
+ return APR_SUCCESS;
}
-static void output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
+apr_status_t h2_mplx_keep_active(h2_mplx *m, h2_stream *stream)
{
- h2_mplx *m = ctx;
- apr_status_t status;
- h2_stream *stream;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- stream = h2_ihash_get(m->streams, beam->id);
- if (stream && h2_stream_is_suspended(stream)) {
- h2_ihash_add(m->sresume, stream);
- h2_beam_on_produced(beam, NULL, NULL);
- have_out_data_for(m, beam->id);
- }
- leave_mutex(m, acquired);
- }
+ check_data_for(m, stream, 1);
+ return APR_SUCCESS;
}
-apr_status_t h2_mplx_suspend_stream(h2_mplx *m, int stream_id)
+int h2_mplx_awaits_data(h2_mplx *m)
{
- apr_status_t status;
- h2_stream *stream;
- h2_task *task;
- int acquired;
-
- AP_DEBUG_ASSERT(m);
- if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) {
- stream = h2_ihash_get(m->streams, stream_id);
- if (stream) {
- h2_stream_set_suspended(stream, 1);
- task = h2_ihash_get(m->tasks, stream->id);
- if (stream->started && (!task || task->worker_done)) {
- h2_ihash_add(m->sresume, stream);
- }
- else {
- /* register callback so that we can resume on new output */
- h2_beam_on_produced(task->output.beam, output_produced, m);
- }
- }
- leave_mutex(m, acquired);
+ int waiting = 1;
+
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ if (h2_ihash_empty(m->streams)) {
+ waiting = 0;
}
- return status;
+ else if (!m->tasks_active && !h2_ififo_count(m->readyq)
+ && h2_iq_empty(m->q)) {
+ waiting = 0;
+ }
+
+ H2_MPLX_LEAVE(m);
+ return waiting;
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_mplx.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_mplx.h
--- httpd-2.4.23/modules/http2/h2_mplx.h 2016-06-14 10:51:31.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_mplx.h 2017-07-06 14:58:22.000000000 +0200
@@ -40,7 +40,6 @@ struct apr_thread_cond_t;
struct h2_bucket_beam;
struct h2_config;
struct h2_ihash_t;
-struct h2_response;
struct h2_task;
struct h2_stream;
struct h2_request;
@@ -54,40 +53,31 @@ struct h2_req_engine;
typedef struct h2_mplx h2_mplx;
-/**
- * Callback invoked for every stream that had input data read since
- * the last invocation.
- */
-typedef void h2_mplx_consumed_cb(void *ctx, int stream_id, apr_off_t consumed);
-
struct h2_mplx {
long id;
conn_rec *c;
apr_pool_t *pool;
- apr_bucket_alloc_t *bucket_alloc;
+ server_rec *s; /* server for master conn */
- APR_RING_ENTRY(h2_mplx) link;
-
- unsigned int aborted : 1;
- unsigned int need_registration : 1;
+ unsigned int event_pending;
+ unsigned int aborted;
+ unsigned int is_registered; /* is registered at h2_workers */
struct h2_ihash_t *streams; /* all streams currently processing */
+ struct h2_ihash_t *sredo; /* all streams that need to be re-started */
struct h2_ihash_t *shold; /* all streams done with task ongoing */
struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
-
- struct h2_iqueue *q; /* all stream ids that need to be started */
- struct h2_ihash_t *sready; /* all streams ready for response */
- struct h2_ihash_t *sresume; /* all streams that can be resumed */
- struct h2_ihash_t *tasks; /* all tasks started and not destroyed */
+ struct h2_iqueue *q; /* all stream ids that need to be started */
+ struct h2_ififo *readyq; /* all stream ids ready for output */
+
struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */
- apr_uint32_t max_streams; /* max # of concurrent streams */
- apr_uint32_t max_stream_started; /* highest stream id that started processing */
- apr_uint32_t workers_busy; /* # of workers processing on this mplx */
- apr_uint32_t workers_limit; /* current # of workers limit, dynamic */
- apr_uint32_t workers_def_limit; /* default # of workers limit */
- apr_uint32_t workers_max; /* max, hard limit # of workers in a process */
+ int max_streams; /* max # of concurrent streams */
+ int max_stream_started; /* highest stream id that started processing */
+ int tasks_active; /* # of tasks being processed from this mplx */
+ int limit_active; /* current limit on active tasks, dynamic */
+ int max_active; /* max, hard limit # of active tasks in a process */
apr_time_t last_idle_block; /* last time, this mplx entered IDLE while
* streams were ready */
apr_time_t last_limit_change; /* last time, worker limit changed */
@@ -99,18 +89,12 @@ struct h2_mplx {
struct apr_thread_cond_t *join_wait;
apr_size_t stream_max_mem;
- apr_interval_time_t stream_timeout;
apr_pool_t *spare_io_pool;
apr_array_header_t *spare_slaves; /* spare slave connections */
struct h2_workers *workers;
- int tx_handles_reserved;
- apr_size_t tx_chunk_size;
- h2_mplx_consumed_cb *input_consumed;
- void *input_consumed_ctx;
-
struct h2_ngn_shed *ngn_shed;
};
@@ -128,7 +112,6 @@ apr_status_t h2_mplx_child_init(apr_pool
*/
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master,
const struct h2_config *conf,
- apr_interval_time_t stream_timeout,
struct h2_workers *workers);
/**
@@ -139,15 +122,9 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr
* @param m the mplx to be released and destroyed
* @param wait condition var to wait on for ref counter == 0
*/
-apr_status_t h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
-
-/**
- * Aborts the multiplexer. It will answer all future invocation with
- * APR_ECONNABORTED, leading to early termination of ongoing streams.
- */
-void h2_mplx_abort(h2_mplx *mplx);
+void h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
-struct h2_task *h2_mplx_pop_task(h2_mplx *mplx, int *has_more);
+apr_status_t h2_mplx_pop_task(h2_mplx *m, struct h2_task **ptask);
void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
@@ -156,21 +133,24 @@ void h2_mplx_task_done(h2_mplx *m, struc
* but let the ongoing ones finish normally.
* @return the highest stream id being/been processed
*/
-apr_uint32_t h2_mplx_shutdown(h2_mplx *m);
+int h2_mplx_shutdown(h2_mplx *m);
+
+int h2_mplx_is_busy(h2_mplx *m);
/*******************************************************************************
* IO lifetime of streams.
******************************************************************************/
+struct h2_stream *h2_mplx_stream_get(h2_mplx *m, int id);
+
/**
- * Notifies mplx that a stream has finished processing.
+ * Notifies mplx that a stream has been completely handled on the main
+ * connection and is ready for cleanup.
*
* @param m the mplx itself
- * @param stream the id of the stream being done
- * @param rst_error if != 0, the stream was reset with the error given
- *
+ * @param stream the stream ready for cleanup
*/
-apr_status_t h2_mplx_stream_done(h2_mplx *m, struct h2_stream *stream);
+apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
/**
* Waits on output data from any stream in this session to become available.
@@ -179,6 +159,8 @@ apr_status_t h2_mplx_stream_done(h2_mplx
apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
struct apr_thread_cond_t *iowait);
+apr_status_t h2_mplx_keep_active(h2_mplx *m, struct h2_stream *stream);
+
/*******************************************************************************
* Stream processing.
******************************************************************************/
@@ -204,31 +186,29 @@ apr_status_t h2_mplx_process(h2_mplx *m,
*/
apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
+typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
+
/**
- * Register a callback for the amount of input data consumed per stream. The
- * will only ever be invoked from the thread creating this h2_mplx, e.g. when
- * calls from that thread into this h2_mplx are made.
- *
- * @param m the multiplexer to register the callback at
- * @param cb the function to invoke
- * @param ctx user supplied argument to invocation.
+ * Check if the multiplexer has events for the master connection pending.
+ * @return != 0 iff there are events pending
*/
-void h2_mplx_set_consumed_cb(h2_mplx *m, h2_mplx_consumed_cb *cb, void *ctx);
-
-
-typedef apr_status_t stream_ev_callback(void *ctx, int stream_id);
+int h2_mplx_has_master_events(h2_mplx *m);
/**
* Dispatch events for the master connection, such as
- * - resume: new output data has arrived for a suspended stream
- * - response: the response for a stream is ready
+ ± @param m the multiplexer
+ * @param on_resume new output data has arrived for a suspended stream
+ * @param ctx user supplied argument to invocation.
*/
apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
stream_ev_callback *on_resume,
- stream_ev_callback *on_response,
void *ctx);
-apr_status_t h2_mplx_suspend_stream(h2_mplx *m, int stream_id);
+int h2_mplx_awaits_data(h2_mplx *m);
+
+typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
+
+apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
/*******************************************************************************
* Output handling of streams.
@@ -238,7 +218,7 @@ apr_status_t h2_mplx_suspend_stream(h2_m
* Opens the output for the given stream with the specified response.
*/
apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id,
- struct h2_response *response);
+ struct h2_bucket_beam *beam);
/*******************************************************************************
* h2_mplx list Manipulation.
@@ -331,7 +311,7 @@ typedef apr_status_t h2_mplx_req_engine_
const char *id,
const char *type,
apr_pool_t *pool,
- apr_uint32_t req_buffer_size,
+ apr_size_t req_buffer_size,
request_rec *r,
h2_output_consumed **pconsumed,
void **pbaton);
@@ -341,8 +321,9 @@ apr_status_t h2_mplx_req_engine_push(con
h2_mplx_req_engine_init *einit);
apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn,
apr_read_type_e block,
- apr_uint32_t capacity,
+ int capacity,
request_rec **pr);
-void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn);
+void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn,
+ apr_status_t status);
#endif /* defined(__mod_h2__h2_mplx__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_ngn_shed.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_ngn_shed.c
--- httpd-2.4.23/modules/http2/h2_ngn_shed.c 2016-06-22 15:30:24.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_ngn_shed.c 2017-04-10 17:04:55.000000000 +0200
@@ -35,7 +35,6 @@
#include "h2_ctx.h"
#include "h2_h2.h"
#include "h2_mplx.h"
-#include "h2_response.h"
#include "h2_request.h"
#include "h2_task.h"
#include "h2_util.h"
@@ -46,6 +45,7 @@ typedef struct h2_ngn_entry h2_ngn_entry
struct h2_ngn_entry {
APR_RING_ENTRY(h2_ngn_entry) link;
h2_task *task;
+ request_rec *r;
};
#define H2_NGN_ENTRY_NEXT(e) APR_RING_NEXT((e), link)
@@ -72,17 +72,17 @@ struct h2_req_engine {
const char *type; /* name of the engine type */
apr_pool_t *pool; /* pool for engine specific allocations */
conn_rec *c; /* connection this engine is assigned to */
- h2_task *task; /* the task this engine is base on, running in */
+ h2_task *task; /* the task this engine is based on, running in */
h2_ngn_shed *shed;
unsigned int shutdown : 1; /* engine is being shut down */
unsigned int done : 1; /* engine has finished */
APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries;
- apr_uint32_t capacity; /* maximum concurrent requests */
- apr_uint32_t no_assigned; /* # of assigned requests */
- apr_uint32_t no_live; /* # of live */
- apr_uint32_t no_finished; /* # of finished */
+ int capacity; /* maximum concurrent requests */
+ int no_assigned; /* # of assigned requests */
+ int no_live; /* # of live */
+ int no_finished; /* # of finished */
h2_output_consumed *out_consumed;
void *out_consumed_ctx;
@@ -107,8 +107,8 @@ void h2_req_engine_out_consumed(h2_req_e
}
h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c,
- apr_uint32_t default_capacity,
- apr_uint32_t req_buffer_size)
+ int default_capacity,
+ apr_size_t req_buffer_size)
{
h2_ngn_shed *shed;
@@ -144,30 +144,50 @@ void h2_ngn_shed_abort(h2_ngn_shed *shed
shed->aborted = 1;
}
-static void ngn_add_task(h2_req_engine *ngn, h2_task *task)
+static void ngn_add_task(h2_req_engine *ngn, h2_task *task, request_rec *r)
{
h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry));
APR_RING_ELEM_INIT(entry, link);
entry->task = task;
+ entry->r = r;
H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry);
+ ngn->no_assigned++;
}
-apr_status_t h2_ngn_shed_push_task(h2_ngn_shed *shed, const char *ngn_type,
- h2_task *task, http2_req_engine_init *einit)
+apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type,
+ request_rec *r,
+ http2_req_engine_init *einit)
{
h2_req_engine *ngn;
+ h2_task *task = h2_ctx_rget_task(r);
- AP_DEBUG_ASSERT(shed);
-
+ ap_assert(task);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
"h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id,
task->id);
- if (task->ser_headers) {
+ if (task->request->serialize) {
/* Max compatibility, deny processing of this */
return APR_EOF;
}
+ if (task->assigned) {
+ --task->assigned->no_assigned;
+ --task->assigned->no_live;
+ task->assigned = NULL;
+ }
+
+ if (task->engine) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ "h2_ngn_shed(%ld): push task(%s) hosting engine %s "
+ "already with %d tasks",
+ shed->c->id, task->id, task->engine->id,
+ task->engine->no_assigned);
+ task->assigned = task->engine;
+ ngn_add_task(task->engine, task, r);
+ return APR_SUCCESS;
+ }
+
ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING);
if (ngn && !ngn->shutdown) {
/* this task will be processed in another thread,
@@ -175,12 +195,10 @@ apr_status_t h2_ngn_shed_push_task(h2_ng
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_ngn_shed(%ld): pushing request %s to %s",
shed->c->id, task->id, ngn->id);
- if (!h2_task_is_detached(task)) {
+ if (!h2_task_has_thawed(task)) {
h2_task_freeze(task);
}
- /* FIXME: sometimes ngn is garbage, probly alread freed */
- ngn_add_task(ngn, task);
- ngn->no_assigned++;
+ ngn_add_task(ngn, task, r);
return APR_SUCCESS;
}
@@ -202,13 +220,13 @@ apr_status_t h2_ngn_shed_push_task(h2_ng
APR_RING_INIT(&newngn->entries, h2_ngn_entry, link);
status = einit(newngn, newngn->id, newngn->type, newngn->pool,
- shed->req_buffer_size, task->r,
+ shed->req_buffer_size, r,
&newngn->out_consumed, &newngn->out_consumed_ctx);
+
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03395)
"h2_ngn_shed(%ld): create engine %s (%s)",
shed->c->id, newngn->id, newngn->type);
if (status == APR_SUCCESS) {
- AP_DEBUG_ASSERT(task->engine == NULL);
newngn->task = task;
task->engine = newngn;
task->assigned = newngn;
@@ -225,7 +243,7 @@ static h2_ngn_entry *pop_detached(h2_req
for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries);
entry = H2_NGN_ENTRY_NEXT(entry)) {
- if (h2_task_is_detached(entry->task)
+ if (h2_task_has_thawed(entry->task)
|| (entry->task->engine == ngn)) {
/* The task hosting this engine can always be pulled by it.
* For other task, they need to become detached, e.g. no longer
@@ -237,17 +255,17 @@ static h2_ngn_entry *pop_detached(h2_req
return NULL;
}
-apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed,
- h2_req_engine *ngn,
- apr_uint32_t capacity,
- int want_shutdown,
- h2_task **ptask)
+apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed,
+ h2_req_engine *ngn,
+ int capacity,
+ int want_shutdown,
+ request_rec **pr)
{
h2_ngn_entry *entry;
- AP_DEBUG_ASSERT(ngn);
- *ptask = NULL;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03396)
+ ap_assert(ngn);
+ *pr = NULL;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, shed->c, APLOGNO(03396)
"h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d",
shed->c->id, ngn->id, want_shutdown);
if (shed->aborted) {
@@ -274,7 +292,7 @@ apr_status_t h2_ngn_shed_pull_task(h2_ng
"h2_ngn_shed(%ld): pulled request %s for engine %s",
shed->c->id, entry->task->id, ngn->id);
ngn->no_live++;
- *ptask = entry->task;
+ *pr = entry->r;
entry->task->assigned = ngn;
/* task will now run in ngn's own thread. Modules like lua
* seem to require the correct thread set in the conn_rec.
@@ -328,7 +346,7 @@ void h2_ngn_shed_done_ngn(h2_ngn_shed *s
if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) {
h2_ngn_entry *entry;
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
"h2_ngn_shed(%ld): exit engine %s (%s), "
"has still requests queued, shutdown=%d,"
"assigned=%ld, live=%ld, finished=%ld",
@@ -340,15 +358,16 @@ void h2_ngn_shed_done_ngn(h2_ngn_shed *s
entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries);
entry = H2_NGN_ENTRY_NEXT(entry)) {
h2_task *task = entry->task;
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
"h2_ngn_shed(%ld): engine %s has queued task %s, "
"frozen=%d, aborting",
shed->c->id, ngn->id, task->id, task->frozen);
ngn_done_task(shed, ngn, task, 0, 1);
+ task->engine = task->assigned = NULL;
}
}
if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
"h2_ngn_shed(%ld): exit engine %s (%s), "
"assigned=%ld, live=%ld, finished=%ld",
shed->c->id, ngn->id, ngn->type,
@@ -364,3 +383,9 @@ void h2_ngn_shed_done_ngn(h2_ngn_shed *s
apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL);
ngn->done = 1;
}
+
+void h2_ngn_shed_destroy(h2_ngn_shed *shed)
+{
+ ap_assert(apr_hash_count(shed->ngns) == 0);
+}
+
diff -up --new-file httpd-2.4.23/modules/http2/h2_ngn_shed.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_ngn_shed.h
--- httpd-2.4.23/modules/http2/h2_ngn_shed.h 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_ngn_shed.h 2016-10-27 18:53:58.000000000 +0200
@@ -28,8 +28,8 @@ struct h2_ngn_shed {
unsigned int aborted : 1;
- apr_uint32_t default_capacity;
- apr_uint32_t req_buffer_size; /* preferred buffer size for responses */
+ int default_capacity;
+ apr_size_t req_buffer_size; /* preferred buffer size for responses */
};
const char *h2_req_engine_get_id(h2_req_engine *engine);
@@ -42,14 +42,16 @@ typedef apr_status_t h2_shed_ngn_init(h2
const char *id,
const char *type,
apr_pool_t *pool,
- apr_uint32_t req_buffer_size,
+ apr_size_t req_buffer_size,
request_rec *r,
h2_output_consumed **pconsumed,
void **pbaton);
h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c,
- apr_uint32_t default_capactiy,
- apr_uint32_t req_buffer_size);
+ int default_capactiy,
+ apr_size_t req_buffer_size);
+
+void h2_ngn_shed_destroy(h2_ngn_shed *shed);
void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx);
void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed);
@@ -58,13 +60,13 @@ h2_ngn_shed *h2_ngn_shed_get_shed(struct
void h2_ngn_shed_abort(h2_ngn_shed *shed);
-apr_status_t h2_ngn_shed_push_task(h2_ngn_shed *shed, const char *ngn_type,
- struct h2_task *task,
- h2_shed_ngn_init *init_cb);
-
-apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed, h2_req_engine *pub_ngn,
- apr_uint32_t capacity,
- int want_shutdown, struct h2_task **ptask);
+apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type,
+ request_rec *r,
+ h2_shed_ngn_init *init_cb);
+
+apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, h2_req_engine *pub_ngn,
+ int capacity,
+ int want_shutdown, request_rec **pr);
apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed,
struct h2_req_engine *ngn,
diff -up --new-file httpd-2.4.23/modules/http2/h2_proxy_session.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_proxy_session.c
--- httpd-2.4.23/modules/http2/h2_proxy_session.c 2016-06-22 15:11:03.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_proxy_session.c 2017-05-02 17:29:13.000000000 +0200
@@ -35,17 +35,21 @@ typedef struct h2_proxy_stream {
const char *url;
request_rec *r;
- h2_request *req;
+ h2_proxy_request *req;
+ const char *real_server_uri;
+ const char *p_server_uri;
int standalone;
- h2_stream_state_t state;
+ h2_proxy_stream_state_t state;
unsigned int suspended : 1;
- unsigned int data_sent : 1;
- unsigned int data_received : 1;
+ unsigned int waiting_on_100 : 1;
+ unsigned int waiting_on_ping : 1;
uint32_t error_code;
apr_bucket_brigade *input;
+ apr_off_t data_sent;
apr_bucket_brigade *output;
+ apr_off_t data_received;
apr_table_t *saves;
} h2_proxy_stream;
@@ -53,6 +57,9 @@ typedef struct h2_proxy_stream {
static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev,
int arg, const char *msg);
+static void ping_arrived(h2_proxy_session *session);
+static apr_status_t check_suspended(h2_proxy_session *session);
+static void stream_resume(h2_proxy_stream *stream);
static apr_status_t proxy_session_pre_close(void *theconn)
@@ -64,7 +71,7 @@ static apr_status_t proxy_session_pre_cl
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"proxy_session(%s): pool cleanup, state=%d, streams=%d",
session->id, session->state,
- (int)h2_ihash_count(session->streams));
+ (int)h2_proxy_ihash_count(session->streams));
session->aborted = 1;
dispatch_event(session, H2_PROXYS_EV_PRE_CLOSE, 0, NULL);
nghttp2_session_del(session->ngh2);
@@ -94,7 +101,7 @@ static int proxy_pass_brigade(apr_bucket
* issues in case of error returned below. */
apr_brigade_cleanup(bb);
if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, origin, APLOGNO(03357)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, origin, APLOGNO(03357)
"pass output failed to %pI (%s)",
p_conn->addr, p_conn->hostname);
}
@@ -131,19 +138,64 @@ static int on_frame_recv(nghttp2_session
void *user_data)
{
h2_proxy_session *session = user_data;
+ h2_proxy_stream *stream;
+ request_rec *r;
int n;
if (APLOGcdebug(session->c)) {
char buffer[256];
- h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ h2_proxy_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03341)
"h2_proxy_session(%s): recv FRAME[%s]",
session->id, buffer);
}
+ session->last_frame_received = apr_time_now();
switch (frame->hd.type) {
case NGHTTP2_HEADERS:
+ stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id);
+ if (!stream) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ r = stream->r;
+ if (r->status >= 100 && r->status < 200) {
+ /* By default, we will forward all interim responses when
+ * we are sitting on a HTTP/2 connection to the client */
+ int forward = session->h2_front;
+ switch(r->status) {
+ case 100:
+ if (stream->waiting_on_100) {
+ stream->waiting_on_100 = 0;
+ r->status_line = ap_get_status_line(r->status);
+ forward = 1;
+ }
+ break;
+ case 103:
+ /* workaround until we get this into http protocol base
+ * parts. without this, unknown codes are converted to
+ * 500... */
+ r->status_line = "103 Early Hints";
+ break;
+ default:
+ r->status_line = ap_get_status_line(r->status);
+ break;
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03487)
+ "h2_proxy_session(%s): got interim HEADERS, "
+ "status=%d, will forward=%d",
+ session->id, r->status, forward);
+ if (forward) {
+ ap_send_interim_response(r, 1);
+ }
+ }
+ stream_resume(stream);
+ break;
+ case NGHTTP2_PING:
+ if (session->check_ping) {
+ session->check_ping = 0;
+ ping_arrived(session);
+ }
break;
case NGHTTP2_PUSH_PROMISE:
break;
@@ -174,7 +226,7 @@ static int before_frame_send(nghttp2_ses
if (APLOGcdebug(session->c)) {
char buffer[256];
- h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
+ h2_proxy_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03343)
"h2_proxy_session(%s): sent FRAME[%s]",
session->id, buffer);
@@ -188,7 +240,7 @@ static int add_header(void *table, const
return 1;
}
-static void process_proxy_header(request_rec *r, const char *n, const char *v)
+static void process_proxy_header(h2_proxy_stream *stream, const char *n, const char *v)
{
static const struct {
const char *name;
@@ -201,16 +253,26 @@ static void process_proxy_header(request
{ "Set-Cookie", ap_proxy_cookie_reverse_map },
{ NULL, NULL }
};
+ request_rec *r = stream->r;
proxy_dir_conf *dconf;
int i;
- for (i = 0; transform_hdrs[i].name; ++i) {
- if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) {
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ if (!dconf->preserve_host) {
+ for (i = 0; transform_hdrs[i].name; ++i) {
+ if (!ap_cstr_casecmp(transform_hdrs[i].name, n)) {
+ apr_table_add(r->headers_out, n,
+ (*transform_hdrs[i].func)(r, dconf, v));
+ return;
+ }
+ }
+ if (!ap_cstr_casecmp("Link", n)) {
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
apr_table_add(r->headers_out, n,
- (*transform_hdrs[i].func)(r, dconf, v));
+ h2_proxy_link_reverse_map(r, dconf,
+ stream->real_server_uri, stream->p_server_uri, v));
return;
- }
+ }
}
apr_table_add(r->headers_out, n, v);
}
@@ -240,13 +302,13 @@ static apr_status_t h2_proxy_stream_add_
char *hname, *hvalue;
hname = apr_pstrndup(stream->pool, n, nlen);
- h2_util_camel_case_header(hname, nlen);
+ h2_proxy_util_camel_case_header(hname, nlen);
hvalue = apr_pstrndup(stream->pool, v, vlen);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
"h2_proxy_stream(%s-%d): got header %s: %s",
stream->session->id, stream->id, hname, hvalue);
- process_proxy_header(stream->r, hname, hvalue);
+ process_proxy_header(stream, hname, hvalue);
}
return APR_SUCCESS;
}
@@ -284,7 +346,7 @@ static void h2_proxy_stream_end_headers_
/* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host,
* then the server name returned by ap_get_server_name() is the
- * origin server name (which does make too much sense with Via: headers)
+ * origin server name (which doesn't make sense with Via: headers)
* so we use the proxy vhost's name instead.
*/
if (server_name == stream->r->hostname) {
@@ -320,9 +382,9 @@ static void h2_proxy_stream_end_headers_
}
}
-static int on_data_chunk_recv(nghttp2_session *ngh2, uint8_t flags,
- int32_t stream_id, const uint8_t *data,
- size_t len, void *user_data)
+static int stream_response_data(nghttp2_session *ngh2, uint8_t flags,
+ int32_t stream_id, const uint8_t *data,
+ size_t len, void *user_data)
{
h2_proxy_session *session = user_data;
h2_proxy_stream *stream;
@@ -342,8 +404,8 @@ static int on_data_chunk_recv(nghttp2_se
/* last chance to manipulate response headers.
* after this, only trailers */
h2_proxy_stream_end_headers_out(stream);
- stream->data_received = 1;
}
+ stream->data_received += len;
b = apr_bucket_transient_create((const char*)data, len,
stream->r->connection->bucket_alloc);
@@ -353,10 +415,11 @@ static int on_data_chunk_recv(nghttp2_se
b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, stream->r, APLOGNO(03359)
- "h2_proxy_session(%s): pass response data for "
- "stream %d, %d bytes", session->id, stream_id, (int)len);
status = ap_pass_brigade(stream->r->output_filters, stream->output);
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03359)
+ "h2_proxy_session(%s): stream=%d, response DATA %ld, %ld"
+ " total", session->id, stream_id, (long)len,
+ (long)stream->data_received);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03344)
"h2_proxy_session(%s): passing output on stream %d",
@@ -383,7 +446,7 @@ static int on_stream_close(nghttp2_sessi
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03360)
"h2_proxy_session(%s): stream=%d, closed, err=%d",
session->id, stream_id, error_code);
- stream = h2_ihash_get(session->streams, stream_id);
+ stream = h2_proxy_ihash_get(session->streams, stream_id);
if (stream) {
stream->error_code = error_code;
}
@@ -417,10 +480,10 @@ static int on_header(nghttp2_session *ng
return 0;
}
-static ssize_t stream_data_read(nghttp2_session *ngh2, int32_t stream_id,
- uint8_t *buf, size_t length,
- uint32_t *data_flags,
- nghttp2_data_source *source, void *user_data)
+static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id,
+ uint8_t *buf, size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source, void *user_data)
{
h2_proxy_stream *stream;
apr_status_t status = APR_SUCCESS;
@@ -434,7 +497,17 @@ static ssize_t stream_data_read(nghttp2_
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
- if (APR_BRIGADE_EMPTY(stream->input)) {
+ if (stream->session->check_ping) {
+ /* suspend until we hear from the other side */
+ stream->waiting_on_ping = 1;
+ status = APR_EAGAIN;
+ }
+ else if (stream->r->expecting_100) {
+ /* suspend until the answer comes */
+ stream->waiting_on_100 = 1;
+ status = APR_EAGAIN;
+ }
+ else if (APR_BRIGADE_EMPTY(stream->input)) {
status = ap_get_brigade(stream->r->input_filters, stream->input,
AP_MODE_READBYTES, APR_NONBLOCK_READ,
H2MAX(APR_BUCKET_BUFF_SIZE, length));
@@ -476,10 +549,12 @@ static ssize_t stream_data_read(nghttp2_
apr_bucket_delete(b);
}
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r,
- "h2_proxy_stream(%d): request body read %ld bytes, flags=%d",
- stream->id, (long)readlen, (int)*data_flags);
- stream->data_sent = 1;
+ stream->data_sent += readlen;
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468)
+ "h2_proxy_stream(%d): request DATA %ld, %ld"
+ " total, flags=%d",
+ stream->id, (long)readlen, (long)stream->data_sent,
+ (int)*data_flags);
return readlen;
}
else if (APR_STATUS_IS_EAGAIN(status)) {
@@ -488,7 +563,7 @@ static ssize_t stream_data_read(nghttp2_
"h2_proxy_stream(%s-%d): suspending",
stream->session->id, stream_id);
stream->suspended = 1;
- h2_iq_add(stream->session->suspended, stream->id, NULL, NULL);
+ h2_proxy_iq_add(stream->session->suspended, stream->id, NULL, NULL);
return NGHTTP2_ERR_DEFERRED;
}
else {
@@ -498,8 +573,30 @@ static ssize_t stream_data_read(nghttp2_
}
}
+#ifdef H2_NG2_INVALID_HEADER_CB
+static int on_invalid_header_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags, void *user_data)
+{
+ h2_proxy_session *session = user_data;
+ if (APLOGcdebug(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03469)
+ "h2_proxy_session(%s-%d): denying stream with invalid header "
+ "'%s: %s'", session->id, (int)frame->hd.stream_id,
+ apr_pstrndup(session->pool, (const char *)name, namelen),
+ apr_pstrndup(session->pool, (const char *)value, valuelen));
+ }
+ return nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ frame->hd.stream_id,
+ NGHTTP2_PROTOCOL_ERROR);
+}
+#endif
+
h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
proxy_server_conf *conf,
+ int h2_front,
unsigned char window_bits_connection,
unsigned char window_bits_stream,
h2_proxy_request_done *done)
@@ -520,10 +617,11 @@ h2_proxy_session *h2_proxy_session_setup
session->conf = conf;
session->pool = p_conn->scpool;
session->state = H2_PROXYS_ST_INIT;
+ session->h2_front = h2_front;
session->window_bits_stream = window_bits_stream;
session->window_bits_connection = window_bits_connection;
- session->streams = h2_ihash_create(pool, offsetof(h2_proxy_stream, id));
- session->suspended = h2_iq_create(pool, 5);
+ session->streams = h2_proxy_ihash_create(pool, offsetof(h2_proxy_stream, id));
+ session->suspended = h2_proxy_iq_create(pool, 5);
session->done = done;
session->input = apr_brigade_create(session->pool, session->c->bucket_alloc);
@@ -531,11 +629,14 @@ h2_proxy_session *h2_proxy_session_setup
nghttp2_session_callbacks_new(&cbs);
nghttp2_session_callbacks_set_on_frame_recv_callback(cbs, on_frame_recv);
- nghttp2_session_callbacks_set_on_data_chunk_recv_callback(cbs, on_data_chunk_recv);
+ nghttp2_session_callbacks_set_on_data_chunk_recv_callback(cbs, stream_response_data);
nghttp2_session_callbacks_set_on_stream_close_callback(cbs, on_stream_close);
nghttp2_session_callbacks_set_on_header_callback(cbs, on_header);
nghttp2_session_callbacks_set_before_frame_send_callback(cbs, before_frame_send);
nghttp2_session_callbacks_set_send_callback(cbs, raw_send);
+#ifdef H2_NG2_INVALID_HEADER_CB
+ nghttp2_session_callbacks_set_on_invalid_header_callback(cbs, on_invalid_header_cb);
+#endif
nghttp2_option_new(&option);
nghttp2_option_set_peer_max_concurrent_streams(option, 100);
@@ -549,6 +650,14 @@ h2_proxy_session *h2_proxy_session_setup
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03362)
"setup session for %s", p_conn->hostname);
}
+ else {
+ h2_proxy_session *session = p_conn->data;
+ apr_interval_time_t age = apr_time_now() - session->last_frame_received;
+ if (age > apr_time_from_sec(1)) {
+ session->check_ping = 1;
+ nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
+ }
+ }
return p_conn->data;
}
@@ -590,6 +699,7 @@ static apr_status_t open_stream(h2_proxy
apr_uri_t puri;
const char *authority, *scheme, *path;
apr_status_t status;
+ proxy_dir_conf *dconf;
stream = apr_pcalloc(r->pool, sizeof(*stream));
@@ -603,26 +713,64 @@ static apr_status_t open_stream(h2_proxy
stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc);
stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc);
- stream->req = h2_req_create(1, stream->pool, 0);
+ stream->req = h2_proxy_req_create(1, stream->pool, 0);
status = apr_uri_parse(stream->pool, url, &puri);
if (status != APR_SUCCESS)
return status;
-
+
scheme = (strcmp(puri.scheme, "h2")? "http" : "https");
- authority = puri.hostname;
- if (!ap_strchr_c(authority, ':') && puri.port
- && apr_uri_port_of_scheme(scheme) != puri.port) {
- /* port info missing and port is not default for scheme: append */
- authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
+
+ dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
+ if (dconf->preserve_host) {
+ authority = r->hostname;
}
+ else {
+ authority = puri.hostname;
+ if (!ap_strchr_c(authority, ':') && puri.port
+ && apr_uri_port_of_scheme(scheme) != puri.port) {
+ /* port info missing and port is not default for scheme: append */
+ authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
+ }
+ }
+
+ /* we need this for mapping relative uris in headers ("Link") back
+ * to local uris */
+ stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority);
+ stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority);
path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
- h2_req_make(stream->req, stream->pool, r->method, scheme,
+ h2_proxy_req_make(stream->req, stream->pool, r->method, scheme,
authority, path, r->headers_in);
+ if (dconf->add_forwarded_headers) {
+ if (PROXYREQ_REVERSE == r->proxyreq) {
+ const char *buf;
+
+ /* Add X-Forwarded-For: so that the upstream has a chance to
+ * determine, where the original request came from.
+ */
+ apr_table_mergen(stream->req->headers, "X-Forwarded-For",
+ r->useragent_ip);
+
+ /* Add X-Forwarded-Host: so that upstream knows what the
+ * original request hostname was.
+ */
+ if ((buf = apr_table_get(r->headers_in, "Host"))) {
+ apr_table_mergen(stream->req->headers, "X-Forwarded-Host", buf);
+ }
+
+ /* Add X-Forwarded-Server: so that upstream knows what the
+ * name of this proxy server is (if there are more than one)
+ * XXX: This duplicates Via: - do we strictly need it?
+ */
+ apr_table_mergen(stream->req->headers, "X-Forwarded-Server",
+ r->server->server_hostname);
+ }
+ }
+
/* Tuck away all already existing cookies */
stream->saves = apr_table_make(r->pool, 2);
- apr_table_do(add_header, stream->saves, r->headers_out,"Set-Cookie", NULL);
+ apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
*pstream = stream;
@@ -631,40 +779,45 @@ static apr_status_t open_stream(h2_proxy
static apr_status_t submit_stream(h2_proxy_session *session, h2_proxy_stream *stream)
{
- h2_ngheader *hd;
+ h2_proxy_ngheader *hd;
nghttp2_data_provider *pp = NULL;
nghttp2_data_provider provider;
- int rv;
+ int rv, may_have_request_body = 1;
apr_status_t status;
- hd = h2_util_ngheader_make_req(stream->pool, stream->req);
+ hd = h2_proxy_util_nghd_make_req(stream->pool, stream->req);
- status = ap_get_brigade(stream->r->input_filters, stream->input,
- AP_MODE_READBYTES, APR_NONBLOCK_READ,
- APR_BUCKET_BUFF_SIZE);
- if ((status == APR_SUCCESS && !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(stream->input)))
- || APR_STATUS_IS_EAGAIN(status)) {
- /* there might be data coming */
+ /* If we expect a 100-continue response, we must refrain from reading
+ any input until we get it. Reading the input will possibly trigger
+ HTTP_IN filter to generate the 100-continue itself. */
+ if (stream->waiting_on_100 || stream->waiting_on_ping) {
+ /* make a small test if we get an EOF/EOS immediately */
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ may_have_request_body = APR_STATUS_IS_EAGAIN(status)
+ || (status == APR_SUCCESS
+ && !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(stream->input)));
+ }
+
+ if (may_have_request_body) {
provider.source.fd = 0;
provider.source.ptr = NULL;
- provider.read_callback = stream_data_read;
+ provider.read_callback = stream_request_data;
pp = &provider;
}
rv = nghttp2_submit_request(session->ngh2, NULL,
hd->nv, hd->nvlen, pp, stream);
- if (APLOGcdebug(session->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363)
- "h2_proxy_session(%s): submit %s%s -> %d",
- session->id, stream->req->authority, stream->req->path,
- rv);
- }
-
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03363)
+ "h2_proxy_session(%s): submit %s%s -> %d",
+ session->id, stream->req->authority, stream->req->path,
+ rv);
if (rv > 0) {
stream->id = rv;
stream->state = H2_STREAM_ST_OPEN;
- h2_ihash_add(session->streams, stream);
+ h2_proxy_ihash_add(session->streams, stream);
dispatch_event(session, H2_PROXYS_EV_STREAM_SUBMITTED, rv, NULL);
return APR_SUCCESS;
@@ -747,7 +900,7 @@ static apr_status_t h2_proxy_session_rea
AP_MODE_READBYTES,
block? APR_BLOCK_READ : APR_NONBLOCK_READ,
64 * 1024);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
"h2_proxy_session(%s): read from conn", session->id);
if (socket && save_timeout != -1) {
apr_socket_timeout_set(socket, save_timeout);
@@ -788,6 +941,18 @@ apr_status_t h2_proxy_session_submit(h2_
return status;
}
+static void stream_resume(h2_proxy_stream *stream)
+{
+ h2_proxy_session *session = stream->session;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_stream(%s-%d): resuming",
+ session->id, stream->id);
+ stream->suspended = 0;
+ h2_proxy_iq_remove(session->suspended, stream->id);
+ nghttp2_session_resume_data(session->ngh2, stream->id);
+ dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+}
+
static apr_status_t check_suspended(h2_proxy_session *session)
{
h2_proxy_stream *stream;
@@ -798,17 +963,16 @@ static apr_status_t check_suspended(h2_p
stream_id = session->suspended->elts[i];
stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
if (stream) {
- status = ap_get_brigade(stream->r->input_filters, stream->input,
- AP_MODE_READBYTES, APR_NONBLOCK_READ,
- APR_BUCKET_BUFF_SIZE);
+ if (stream->waiting_on_100 || stream->waiting_on_ping) {
+ status = APR_EAGAIN;
+ }
+ else {
+ status = ap_get_brigade(stream->r->input_filters, stream->input,
+ AP_MODE_READBYTES, APR_NONBLOCK_READ,
+ APR_BUCKET_BUFF_SIZE);
+ }
if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(stream->input)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
- "h2_proxy_stream(%s-%d): resuming",
- session->id, stream_id);
- stream->suspended = 0;
- h2_iq_remove(session->suspended, stream_id);
- nghttp2_session_resume_data(session->ngh2, stream_id);
- dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+ stream_resume(stream);
check_suspended(session);
return APR_SUCCESS;
}
@@ -816,15 +980,14 @@ static apr_status_t check_suspended(h2_p
ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, session->c,
APLOGNO(03382) "h2_proxy_stream(%s-%d): check input",
session->id, stream_id);
- h2_iq_remove(session->suspended, stream_id);
- dispatch_event(session, H2_PROXYS_EV_STREAM_RESUMED, 0, NULL);
+ stream_resume(stream);
check_suspended(session);
return APR_SUCCESS;
}
}
else {
/* gone? */
- h2_iq_remove(session->suspended, stream_id);
+ h2_proxy_iq_remove(session->suspended, stream_id);
check_suspended(session);
return APR_SUCCESS;
}
@@ -838,7 +1001,7 @@ static apr_status_t session_shutdown(h2_
apr_status_t status = APR_SUCCESS;
const char *err = msg;
- AP_DEBUG_ASSERT(session);
+ ap_assert(session);
if (!err && reason) {
err = nghttp2_strerror(reason);
}
@@ -893,7 +1056,7 @@ static void ev_init(h2_proxy_session *se
{
switch (session->state) {
case H2_PROXYS_ST_INIT:
- if (h2_ihash_empty(session->streams)) {
+ if (h2_proxy_ihash_empty(session->streams)) {
transit(session, "init", H2_PROXYS_ST_IDLE);
}
else {
@@ -1000,7 +1163,7 @@ static void ev_no_io(h2_proxy_session *s
* CPU cycles. Ideally, we'd like to do a blocking read, but that
* is not possible if we have scheduled tasks and wait
* for them to produce something. */
- if (h2_ihash_empty(session->streams)) {
+ if (h2_proxy_ihash_empty(session->streams)) {
if (!is_accepting_streams(session)) {
/* We are no longer accepting new streams and have
* finished processing existing ones. Time to leave. */
@@ -1049,16 +1212,19 @@ static void ev_stream_done(h2_proxy_sess
if (stream) {
int touched = (stream->data_sent ||
stream_id <= session->last_stream_id);
- int complete = (stream->error_code == 0);
+ apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
"h2_proxy_sesssion(%s): stream(%d) closed "
- "(complete=%d, touched=%d)",
- session->id, stream_id, complete, touched);
+ "(touched=%d, error=%d)",
+ session->id, stream_id, touched, stream->error_code);
- if (complete && !stream->data_received) {
+ if (status != APR_SUCCESS) {
+ stream->r->status = 500;
+ }
+ else if (!stream->data_received) {
apr_bucket *b;
/* if the response had no body, this is the time to flush
- * an empty brigade which will also "write" the resonse
+ * an empty brigade which will also write the resonse
* headers */
h2_proxy_stream_end_headers_out(stream);
stream->data_received = 1;
@@ -1070,10 +1236,10 @@ static void ev_stream_done(h2_proxy_sess
}
stream->state = H2_STREAM_ST_CLOSED;
- h2_ihash_remove(session->streams, stream_id);
- h2_iq_remove(session->suspended, stream_id);
+ h2_proxy_ihash_remove(session->streams, stream_id);
+ h2_proxy_iq_remove(session->suspended, stream_id);
if (session->done) {
- session->done(session, stream->r, complete, touched);
+ session->done(session, stream->r, status, touched);
}
}
@@ -1185,6 +1351,21 @@ static void dispatch_event(h2_proxy_sess
}
}
+static int send_loop(h2_proxy_session *session)
+{
+ while (nghttp2_session_want_write(session->ngh2)) {
+ int rv = nghttp2_session_send(session->ngh2);
+ if (rv < 0 && nghttp2_is_fatal(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_proxy_session(%s): write, rv=%d", session->id, rv);
+ dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
apr_status_t h2_proxy_session_process(h2_proxy_session *session)
{
apr_status_t status;
@@ -1209,16 +1390,7 @@ run_loop:
case H2_PROXYS_ST_BUSY:
case H2_PROXYS_ST_LOCAL_SHUTDOWN:
case H2_PROXYS_ST_REMOTE_SHUTDOWN:
- while (nghttp2_session_want_write(session->ngh2)) {
- int rv = nghttp2_session_send(session->ngh2);
- if (rv < 0 && nghttp2_is_fatal(rv)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_proxy_session(%s): write, rv=%d", session->id, rv);
- dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL);
- break;
- }
- have_written = 1;
- }
+ have_written = send_loop(session);
if (nghttp2_session_want_read(session->ngh2)) {
status = h2_proxy_session_read(session, 0, 0);
@@ -1247,7 +1419,7 @@ run_loop:
}
status = h2_proxy_session_read(session, 1, session->wait_timeout);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
APLOGNO(03365)
"h2_proxy_session(%s): WAIT read, timeout=%fms",
session->id, (float)session->wait_timeout/1000.0);
@@ -1295,28 +1467,71 @@ typedef struct {
h2_proxy_request_done *done;
} cleanup_iter_ctx;
+static int cancel_iter(void *udata, void *val)
+{
+ cleanup_iter_ctx *ctx = udata;
+ h2_proxy_stream *stream = val;
+ nghttp2_submit_rst_stream(ctx->session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, 0);
+ return 1;
+}
+
+void h2_proxy_session_cancel_all(h2_proxy_session *session)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ cleanup_iter_ctx ctx;
+ ctx.session = session;
+ ctx.done = session->done;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03366)
+ "h2_proxy_session(%s): cancel %d streams",
+ session->id, (int)h2_proxy_ihash_count(session->streams));
+ h2_proxy_ihash_iter(session->streams, cancel_iter, &ctx);
+ session_shutdown(session, 0, NULL);
+ }
+}
+
static int done_iter(void *udata, void *val)
{
cleanup_iter_ctx *ctx = udata;
h2_proxy_stream *stream = val;
- int touched = (!ctx->session->last_stream_id ||
+ int touched = (stream->data_sent ||
stream->id <= ctx->session->last_stream_id);
- ctx->done(ctx->session, stream->r, 0, touched);
+ ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched);
return 1;
}
void h2_proxy_session_cleanup(h2_proxy_session *session,
h2_proxy_request_done *done)
{
- if (session->streams && !h2_ihash_empty(session->streams)) {
+ if (!h2_proxy_ihash_empty(session->streams)) {
cleanup_iter_ctx ctx;
ctx.session = session;
ctx.done = done;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03366)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03519)
"h2_proxy_session(%s): terminated, %d streams unfinished",
- session->id, (int)h2_ihash_count(session->streams));
- h2_ihash_iter(session->streams, done_iter, &ctx);
- h2_ihash_clear(session->streams);
+ session->id, (int)h2_proxy_ihash_count(session->streams));
+ h2_proxy_ihash_iter(session->streams, done_iter, &ctx);
+ h2_proxy_ihash_clear(session->streams);
+ }
+}
+
+static int ping_arrived_iter(void *udata, void *val)
+{
+ h2_proxy_stream *stream = val;
+ if (stream->waiting_on_ping) {
+ stream->waiting_on_ping = 0;
+ stream_resume(stream);
+ }
+ return 1;
+}
+
+static void ping_arrived(h2_proxy_session *session)
+{
+ if (!h2_proxy_ihash_empty(session->streams)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03470)
+ "h2_proxy_session(%s): ping arrived, unblocking streams",
+ session->id);
+ h2_proxy_ihash_iter(session->streams, ping_arrived_iter, &session);
}
}
@@ -1347,13 +1562,13 @@ static int win_update_iter(void *udata,
void h2_proxy_session_update_window(h2_proxy_session *session,
conn_rec *c, apr_off_t bytes)
{
- if (session->streams && !h2_ihash_empty(session->streams)) {
+ if (!h2_proxy_ihash_empty(session->streams)) {
win_update_ctx ctx;
ctx.session = session;
ctx.c = c;
ctx.bytes = bytes;
ctx.updated = 0;
- h2_ihash_iter(session->streams, win_update_iter, &ctx);
+ h2_proxy_ihash_iter(session->streams, win_update_iter, &ctx);
if (!ctx.updated) {
/* could not find the stream any more, possibly closed, update
diff -up --new-file httpd-2.4.23/modules/http2/h2_proxy_session.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_proxy_session.h
--- httpd-2.4.23/modules/http2/h2_proxy_session.h 2016-06-13 11:58:07.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_proxy_session.h 2017-02-14 16:53:50.000000000 +0100
@@ -20,8 +20,18 @@
#include <nghttp2/nghttp2.h>
-struct h2_iqueue;
-struct h2_ihash_t;
+struct h2_proxy_iqueue;
+struct h2_proxy_ihash_t;
+
+typedef enum {
+ H2_STREAM_ST_IDLE,
+ H2_STREAM_ST_OPEN,
+ H2_STREAM_ST_RESV_LOCAL,
+ H2_STREAM_ST_RESV_REMOTE,
+ H2_STREAM_ST_CLOSED_INPUT,
+ H2_STREAM_ST_CLOSED_OUTPUT,
+ H2_STREAM_ST_CLOSED,
+} h2_proxy_stream_state_t;
typedef enum {
H2_PROXYS_ST_INIT, /* send initial SETTINGS, etc. */
@@ -52,7 +62,7 @@ typedef enum {
typedef struct h2_proxy_session h2_proxy_session;
typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r,
- int complete, int touched);
+ apr_status_t status, int touched);
struct h2_proxy_session {
const char *id;
@@ -63,6 +73,8 @@ struct h2_proxy_session {
nghttp2_session *ngh2; /* the nghttp2 session itself */
unsigned int aborted : 1;
+ unsigned int check_ping : 1;
+ unsigned int h2_front : 1; /* if front-end connection is HTTP/2 */
h2_proxy_request_done *done;
void *user_data;
@@ -73,10 +85,11 @@ struct h2_proxy_session {
h2_proxys_state state;
apr_interval_time_t wait_timeout;
- struct h2_ihash_t *streams;
- struct h2_iqueue *suspended;
+ struct h2_proxy_ihash_t *streams;
+ struct h2_proxy_iqueue *suspended;
apr_size_t remote_max_concurrent;
int last_stream_id; /* last stream id processed by backend, or 0 */
+ apr_time_t last_frame_received;
apr_bucket_brigade *input;
apr_bucket_brigade *output;
@@ -84,6 +97,7 @@ struct h2_proxy_session {
h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
proxy_server_conf *conf,
+ int h2_front,
unsigned char window_bits_connection,
unsigned char window_bits_stream,
h2_proxy_request_done *done);
@@ -101,6 +115,8 @@ apr_status_t h2_proxy_session_submit(h2_
*/
apr_status_t h2_proxy_session_process(h2_proxy_session *s);
+void h2_proxy_session_cancel_all(h2_proxy_session *s);
+
void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
void h2_proxy_session_update_window(h2_proxy_session *s,
diff -up --new-file httpd-2.4.23/modules/http2/h2_proxy_util.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_proxy_util.c
--- httpd-2.4.23/modules/http2/h2_proxy_util.c 2016-06-28 21:57:30.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_proxy_util.c 2017-04-10 17:04:55.000000000 +0200
@@ -14,20 +14,26 @@
*/
#include <assert.h>
+#include <apr_lib.h>
#include <apr_strings.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
#include <httpd.h>
#include <http_core.h>
#include <http_log.h>
#include <http_request.h>
+#include <mod_proxy.h>
#include <nghttp2/nghttp2.h>
#include "h2.h"
#include "h2_proxy_util.h"
+APLOG_USE_MODULE(proxy_http2);
+
/* h2_log2(n) iff n is a power of 2 */
-unsigned char h2_log2(apr_uint32_t n)
+unsigned char h2_proxy_log2(int n)
{
int lz = 0;
if (!n) {
@@ -59,7 +65,7 @@ unsigned char h2_log2(apr_uint32_t n)
/*******************************************************************************
* ihash - hash for structs with int identifier
******************************************************************************/
-struct h2_ihash_t {
+struct h2_proxy_ihash_t {
apr_hash_t *hash;
size_t ioff;
};
@@ -69,31 +75,31 @@ static unsigned int ihash(const char *ke
return (unsigned int)(*((int*)key));
}
-h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int)
+h2_proxy_ihash_t *h2_proxy_ihash_create(apr_pool_t *pool, size_t offset_of_int)
{
- h2_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_ihash_t));
+ h2_proxy_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_proxy_ihash_t));
ih->hash = apr_hash_make_custom(pool, ihash);
ih->ioff = offset_of_int;
return ih;
}
-size_t h2_ihash_count(h2_ihash_t *ih)
+size_t h2_proxy_ihash_count(h2_proxy_ihash_t *ih)
{
return apr_hash_count(ih->hash);
}
-int h2_ihash_empty(h2_ihash_t *ih)
+int h2_proxy_ihash_empty(h2_proxy_ihash_t *ih)
{
return apr_hash_count(ih->hash) == 0;
}
-void *h2_ihash_get(h2_ihash_t *ih, int id)
+void *h2_proxy_ihash_get(h2_proxy_ihash_t *ih, int id)
{
return apr_hash_get(ih->hash, &id, sizeof(id));
}
typedef struct {
- h2_ihash_iter_t *iter;
+ h2_proxy_ihash_iter_t *iter;
void *ctx;
} iter_ctx;
@@ -104,7 +110,7 @@ static int ihash_iter(void *ctx, const v
return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/
}
-int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx)
+int h2_proxy_ihash_iter(h2_proxy_ihash_t *ih, h2_proxy_ihash_iter_t *fn, void *ctx)
{
iter_ctx ictx;
ictx.iter = fn;
@@ -112,30 +118,30 @@ int h2_ihash_iter(h2_ihash_t *ih, h2_iha
return apr_hash_do(ihash_iter, &ictx, ih->hash);
}
-void h2_ihash_add(h2_ihash_t *ih, void *val)
+void h2_proxy_ihash_add(h2_proxy_ihash_t *ih, void *val)
{
apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val);
}
-void h2_ihash_remove(h2_ihash_t *ih, int id)
+void h2_proxy_ihash_remove(h2_proxy_ihash_t *ih, int id)
{
apr_hash_set(ih->hash, &id, sizeof(id), NULL);
}
-void h2_ihash_remove_val(h2_ihash_t *ih, void *val)
+void h2_proxy_ihash_remove_val(h2_proxy_ihash_t *ih, void *val)
{
int id = *((int*)((char *)val + ih->ioff));
apr_hash_set(ih->hash, &id, sizeof(id), NULL);
}
-void h2_ihash_clear(h2_ihash_t *ih)
+void h2_proxy_ihash_clear(h2_proxy_ihash_t *ih)
{
apr_hash_clear(ih->hash);
}
typedef struct {
- h2_ihash_t *ih;
+ h2_proxy_ihash_t *ih;
void **buffer;
size_t max;
size_t len;
@@ -151,7 +157,7 @@ static int collect_iter(void *x, void *v
return 0;
}
-size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max)
+size_t h2_proxy_ihash_shift(h2_proxy_ihash_t *ih, void **buffer, size_t max)
{
collect_ctx ctx;
size_t i;
@@ -160,15 +166,15 @@ size_t h2_ihash_shift(h2_ihash_t *ih, vo
ctx.buffer = buffer;
ctx.max = max;
ctx.len = 0;
- h2_ihash_iter(ih, collect_iter, &ctx);
+ h2_proxy_ihash_iter(ih, collect_iter, &ctx);
for (i = 0; i < ctx.len; ++i) {
- h2_ihash_remove_val(ih, buffer[i]);
+ h2_proxy_ihash_remove_val(ih, buffer[i]);
}
return ctx.len;
}
typedef struct {
- h2_ihash_t *ih;
+ h2_proxy_ihash_t *ih;
int *buffer;
size_t max;
size_t len;
@@ -184,7 +190,7 @@ static int icollect_iter(void *x, void *
return 0;
}
-size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max)
+size_t h2_proxy_ihash_ishift(h2_proxy_ihash_t *ih, int *buffer, size_t max)
{
icollect_ctx ctx;
size_t i;
@@ -193,9 +199,9 @@ size_t h2_ihash_ishift(h2_ihash_t *ih, i
ctx.buffer = buffer;
ctx.max = max;
ctx.len = 0;
- h2_ihash_iter(ih, icollect_iter, &ctx);
+ h2_proxy_ihash_iter(ih, icollect_iter, &ctx);
for (i = 0; i < ctx.len; ++i) {
- h2_ihash_remove(ih, buffer[i]);
+ h2_proxy_ihash_remove(ih, buffer[i]);
}
return ctx.len;
}
@@ -204,16 +210,16 @@ size_t h2_ihash_ishift(h2_ihash_t *ih, i
* iqueue - sorted list of int
******************************************************************************/
-static void iq_grow(h2_iqueue *q, int nlen);
-static void iq_swap(h2_iqueue *q, int i, int j);
-static int iq_bubble_up(h2_iqueue *q, int i, int top,
- h2_iq_cmp *cmp, void *ctx);
-static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
- h2_iq_cmp *cmp, void *ctx);
+static void iq_grow(h2_proxy_iqueue *q, int nlen);
+static void iq_swap(h2_proxy_iqueue *q, int i, int j);
+static int iq_bubble_up(h2_proxy_iqueue *q, int i, int top,
+ h2_proxy_iq_cmp *cmp, void *ctx);
+static int iq_bubble_down(h2_proxy_iqueue *q, int i, int bottom,
+ h2_proxy_iq_cmp *cmp, void *ctx);
-h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity)
+h2_proxy_iqueue *h2_proxy_iq_create(apr_pool_t *pool, int capacity)
{
- h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue));
+ h2_proxy_iqueue *q = apr_pcalloc(pool, sizeof(h2_proxy_iqueue));
if (q) {
q->pool = pool;
iq_grow(q, capacity);
@@ -222,18 +228,18 @@ h2_iqueue *h2_iq_create(apr_pool_t *pool
return q;
}
-int h2_iq_empty(h2_iqueue *q)
+int h2_proxy_iq_empty(h2_proxy_iqueue *q)
{
return q->nelts == 0;
}
-int h2_iq_count(h2_iqueue *q)
+int h2_proxy_iq_count(h2_proxy_iqueue *q)
{
return q->nelts;
}
-void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
+void h2_proxy_iq_add(h2_proxy_iqueue *q, int sid, h2_proxy_iq_cmp *cmp, void *ctx)
{
int i;
@@ -251,7 +257,7 @@ void h2_iq_add(h2_iqueue *q, int sid, h2
}
}
-int h2_iq_remove(h2_iqueue *q, int sid)
+int h2_proxy_iq_remove(h2_proxy_iqueue *q, int sid)
{
int i;
for (i = 0; i < q->nelts; ++i) {
@@ -271,15 +277,15 @@ int h2_iq_remove(h2_iqueue *q, int sid)
return 0;
}
-void h2_iq_clear(h2_iqueue *q)
+void h2_proxy_iq_clear(h2_proxy_iqueue *q)
{
q->nelts = 0;
}
-void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx)
+void h2_proxy_iq_sort(h2_proxy_iqueue *q, h2_proxy_iq_cmp *cmp, void *ctx)
{
/* Assume that changes in ordering are minimal. This needs,
- * best case, q->nelts - 1 comparisions to check that nothing
+ * best case, q->nelts - 1 comparisons to check that nothing
* changed.
*/
if (q->nelts > 0) {
@@ -304,7 +310,7 @@ void h2_iq_sort(h2_iqueue *q, h2_iq_cmp
}
-int h2_iq_shift(h2_iqueue *q)
+int h2_proxy_iq_shift(h2_proxy_iqueue *q)
{
int sid;
@@ -319,7 +325,7 @@ int h2_iq_shift(h2_iqueue *q)
return sid;
}
-static void iq_grow(h2_iqueue *q, int nlen)
+static void iq_grow(h2_proxy_iqueue *q, int nlen)
{
if (nlen > q->nalloc) {
int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen);
@@ -339,15 +345,15 @@ static void iq_grow(h2_iqueue *q, int nl
}
}
-static void iq_swap(h2_iqueue *q, int i, int j)
+static void iq_swap(h2_proxy_iqueue *q, int i, int j)
{
int x = q->elts[i];
q->elts[i] = q->elts[j];
q->elts[j] = x;
}
-static int iq_bubble_up(h2_iqueue *q, int i, int top,
- h2_iq_cmp *cmp, void *ctx)
+static int iq_bubble_up(h2_proxy_iqueue *q, int i, int top,
+ h2_proxy_iq_cmp *cmp, void *ctx)
{
int prev;
while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top)
@@ -358,8 +364,8 @@ static int iq_bubble_up(h2_iqueue *q, in
return i;
}
-static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
- h2_iq_cmp *cmp, void *ctx)
+static int iq_bubble_down(h2_proxy_iqueue *q, int i, int bottom,
+ h2_proxy_iq_cmp *cmp, void *ctx)
{
int next;
while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom)
@@ -371,7 +377,7 @@ static int iq_bubble_down(h2_iqueue *q,
}
/*******************************************************************************
- * h2_ngheader
+ * h2_proxy_ngheader
******************************************************************************/
#define H2_HD_MATCH_LIT_CS(l, name) \
((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
@@ -397,7 +403,7 @@ static int count_header(void *ctx, const
#define NV_ADD_LIT_CS(nv, k, v) add_header(nv, k, sizeof(k) - 1, v, strlen(v))
#define NV_ADD_CS_CS(nv, k, v) add_header(nv, k, strlen(k), v, strlen(v))
-static int add_header(h2_ngheader *ngh,
+static int add_header(h2_proxy_ngheader *ngh,
const char *key, size_t key_len,
const char *value, size_t val_len)
{
@@ -418,23 +424,23 @@ static int add_table_header(void *ctx, c
return 1;
}
-h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
- const struct h2_request *req)
+h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p,
+ const h2_proxy_request *req)
{
- h2_ngheader *ngh;
+ h2_proxy_ngheader *ngh;
size_t n;
- AP_DEBUG_ASSERT(req);
- AP_DEBUG_ASSERT(req->scheme);
- AP_DEBUG_ASSERT(req->authority);
- AP_DEBUG_ASSERT(req->path);
- AP_DEBUG_ASSERT(req->method);
+ ap_assert(req);
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
n = 4;
apr_table_do(count_header, &n, req->headers, NULL);
- ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ ngh = apr_pcalloc(p, sizeof(h2_proxy_ngheader));
ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
NV_ADD_LIT_CS(ngh, ":scheme", req->scheme);
NV_ADD_LIT_CS(ngh, ":authority", req->authority);
@@ -458,7 +464,6 @@ typedef struct {
#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
static literal IgnoredRequestHeaders[] = {
- H2_DEF_LITERAL("expect"),
H2_DEF_LITERAL("upgrade"),
H2_DEF_LITERAL("connection"),
H2_DEF_LITERAL("keep-alive"),
@@ -485,18 +490,18 @@ static int ignore_header(const literal *
return 0;
}
-static int h2_req_ignore_header(const char *name, size_t len)
+static int h2_proxy_req_ignore_header(const char *name, size_t len)
{
return ignore_header(H2_LIT_ARGS(IgnoredRequestHeaders), name, len);
}
int h2_proxy_res_ignore_header(const char *name, size_t len)
{
- return (h2_req_ignore_header(name, len)
+ return (h2_proxy_req_ignore_header(name, len)
|| ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len));
}
-void h2_util_camel_case_header(char *s, size_t len)
+void h2_proxy_util_camel_case_header(char *s, size_t len)
{
size_t start = 1;
size_t i;
@@ -528,7 +533,7 @@ static apr_status_t h2_headers_add_h1(ap
{
char *hname, *hvalue;
- if (h2_req_ignore_header(name, nlen)) {
+ if (h2_proxy_req_ignore_header(name, nlen)) {
return APR_SUCCESS;
}
else if (H2_HD_MATCH_LIT("cookie", name, nlen)) {
@@ -553,20 +558,19 @@ static apr_status_t h2_headers_add_h1(ap
hname = apr_pstrndup(pool, name, nlen);
hvalue = apr_pstrndup(pool, value, vlen);
- h2_util_camel_case_header(hname, nlen);
+ h2_proxy_util_camel_case_header(hname, nlen);
apr_table_mergen(headers, hname, hvalue);
return APR_SUCCESS;
}
-static h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
+static h2_proxy_request *h2_proxy_req_createn(int id, apr_pool_t *pool, const char *method,
const char *scheme, const char *authority,
const char *path, apr_table_t *header,
int serialize)
{
- h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
+ h2_proxy_request *req = apr_pcalloc(pool, sizeof(h2_proxy_request));
- req->id = id;
req->method = method;
req->scheme = scheme;
req->authority = authority;
@@ -578,9 +582,9 @@ static h2_request *h2_req_createn(int id
return req;
}
-h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize)
+h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize)
{
- return h2_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
+ return h2_proxy_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
}
typedef struct {
@@ -592,13 +596,13 @@ static int set_h1_header(void *ctx, cons
{
h1_ctx *x = ctx;
size_t klen = strlen(key);
- if (!h2_req_ignore_header(key, klen)) {
+ if (!h2_proxy_req_ignore_header(key, klen)) {
h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value));
}
return 1;
}
-apr_status_t h2_req_make(h2_request *req, apr_pool_t *pool,
+apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool,
const char *method, const char *scheme,
const char *authority, const char *path,
apr_table_t *headers)
@@ -610,10 +614,10 @@ apr_status_t h2_req_make(h2_request *req
req->authority = authority;
req->path = path;
- AP_DEBUG_ASSERT(req->scheme);
- AP_DEBUG_ASSERT(req->authority);
- AP_DEBUG_ASSERT(req->path);
- AP_DEBUG_ASSERT(req->method);
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
x.pool = pool;
x.headers = req->headers;
@@ -625,7 +629,7 @@ apr_status_t h2_req_make(h2_request *req
* frame logging
******************************************************************************/
-int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
+int h2_proxy_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen)
{
char scratch[128];
size_t s_len = sizeof(scratch)/sizeof(scratch[0]);
@@ -703,3 +707,630 @@ int h2_util_frame_print(const nghttp2_fr
frame->hd.flags, frame->hd.stream_id);
}
}
+
+/*******************************************************************************
+ * link header handling
+ ******************************************************************************/
+
+typedef struct {
+ apr_pool_t *pool;
+ request_rec *r;
+ proxy_dir_conf *conf;
+ const char *s;
+ int slen;
+ int i;
+ const char *server_uri;
+ int su_len;
+ const char *real_backend_uri;
+ int rbu_len;
+ const char *p_server_uri;
+ int psu_len;
+ int link_start;
+ int link_end;
+} link_ctx;
+
+static int attr_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '+':
+ case '-':
+ case '.':
+ case '^':
+ case '_':
+ case '`':
+ case '|':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int ptoken_char(char c)
+{
+ switch (c) {
+ case '!':
+ case '#':
+ case '$':
+ case '&':
+ case '\'':
+ case '(':
+ case ')':
+ case '*':
+ case '+':
+ case '-':
+ case '.':
+ case '/':
+ case ':':
+ case '<':
+ case '=':
+ case '>':
+ case '?':
+ case '@':
+ case '[':
+ case ']':
+ case '^':
+ case '_':
+ case '`':
+ case '{':
+ case '|':
+ case '}':
+ case '~':
+ return 1;
+ default:
+ return apr_isalnum(c);
+ }
+}
+
+static int skip_ws(link_ctx *ctx)
+{
+ char c;
+ while (ctx->i < ctx->slen
+ && (((c = ctx->s[ctx->i]) == ' ') || (c == '\t'))) {
+ ++ctx->i;
+ }
+ return (ctx->i < ctx->slen);
+}
+
+static int find_chr(link_ctx *ctx, char c, int *pidx)
+{
+ int j;
+ for (j = ctx->i; j < ctx->slen; ++j) {
+ if (ctx->s[j] == c) {
+ *pidx = j;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_chr(link_ctx *ctx, char c)
+{
+ if (ctx->i < ctx->slen && ctx->s[ctx->i] == c) {
+ ++ctx->i;
+ return 1;
+ }
+ return 0;
+}
+
+static int skip_qstring(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '\"')) {
+ int end;
+ if (find_chr(ctx, '\"', &end)) {
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_ptoken(link_ctx *ctx)
+{
+ if (skip_ws(ctx)) {
+ int i;
+ for (i = ctx->i; i < ctx->slen && ptoken_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+static int read_link(link_ctx *ctx)
+{
+ ctx->link_start = ctx->link_end = 0;
+ if (skip_ws(ctx) && read_chr(ctx, '<')) {
+ int end;
+ if (find_chr(ctx, '>', &end)) {
+ ctx->link_start = ctx->i;
+ ctx->link_end = end;
+ ctx->i = end + 1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_pname(link_ctx *ctx)
+{
+ if (skip_ws(ctx)) {
+ int i;
+ for (i = ctx->i; i < ctx->slen && attr_char(ctx->s[i]); ++i) {
+ /* nop */
+ }
+ if (i > ctx->i) {
+ ctx->i = i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_pvalue(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, '=')) {
+ if (skip_qstring(ctx) || skip_ptoken(ctx)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int skip_param(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ';')) {
+ if (skip_pname(ctx)) {
+ skip_pvalue(ctx); /* value is optional */
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int read_sep(link_ctx *ctx)
+{
+ if (skip_ws(ctx) && read_chr(ctx, ',')) {
+ return 1;
+ }
+ return 0;
+}
+
+static size_t subst_str(link_ctx *ctx, int start, int end, const char *ns)
+{
+ int olen, nlen, plen;
+ int delta;
+ char *p;
+
+ olen = end - start;
+ nlen = (int)strlen(ns);
+ delta = nlen - olen;
+ plen = ctx->slen + delta + 1;
+ p = apr_pcalloc(ctx->pool, plen);
+ strncpy(p, ctx->s, start);
+ strncpy(p + start, ns, nlen);
+ strcpy(p + start + nlen, ctx->s + end);
+ ctx->s = p;
+ ctx->slen = (int)strlen(p);
+ if (ctx->i >= end) {
+ ctx->i += delta;
+ }
+ return nlen;
+}
+
+static void map_link(link_ctx *ctx)
+{
+ if (ctx->link_start < ctx->link_end) {
+ char buffer[HUGE_STRING_LEN];
+ int need_len, link_len, buffer_len, prepend_p_server;
+ const char *mapped;
+
+ buffer[0] = '\0';
+ buffer_len = 0;
+ link_len = ctx->link_end - ctx->link_start;
+ need_len = link_len + 1;
+ prepend_p_server = (ctx->s[ctx->link_start] == '/');
+ if (prepend_p_server) {
+ /* common to use relative uris in link header, for mappings
+ * to work need to prefix the backend server uri */
+ need_len += ctx->psu_len;
+ strncpy(buffer, ctx->p_server_uri, sizeof(buffer));
+ buffer_len = ctx->psu_len;
+ }
+ if (need_len > sizeof(buffer)) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, ctx->r, APLOGNO(03482)
+ "link_reverse_map uri too long, skipped: %s", ctx->s);
+ return;
+ }
+ strncpy(buffer + buffer_len, ctx->s + ctx->link_start, link_len);
+ buffer_len += link_len;
+ buffer[buffer_len] = '\0';
+ if (!prepend_p_server
+ && strcmp(ctx->real_backend_uri, ctx->p_server_uri)
+ && !strncmp(buffer, ctx->real_backend_uri, ctx->rbu_len)) {
+ /* the server uri and our local proxy uri we use differ, for mapping
+ * to work, we need to use the proxy uri */
+ int path_start = ctx->link_start + ctx->rbu_len;
+ link_len -= ctx->rbu_len;
+ strcpy(buffer, ctx->p_server_uri);
+ strncpy(buffer + ctx->psu_len, ctx->s + path_start, link_len);
+ buffer_len = ctx->psu_len + link_len;
+ buffer[buffer_len] = '\0';
+ }
+ mapped = ap_proxy_location_reverse_map(ctx->r, ctx->conf, buffer);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->r,
+ "reverse_map[%s] %s --> %s", ctx->p_server_uri, buffer, mapped);
+ if (mapped != buffer) {
+ if (prepend_p_server) {
+ if (ctx->server_uri == NULL) {
+ ctx->server_uri = ap_construct_url(ctx->pool, "", ctx->r);
+ ctx->su_len = (int)strlen(ctx->server_uri);
+ }
+ if (!strncmp(mapped, ctx->server_uri, ctx->su_len)) {
+ mapped += ctx->su_len;
+ }
+ }
+ subst_str(ctx, ctx->link_start, ctx->link_end, mapped);
+ }
+ }
+}
+
+/* RFC 5988 <https://tools.ietf.org/html/rfc5988#section-6.2.1>
+ Link = "Link" ":" #link-value
+ link-value = "<" URI-Reference ">" *( ";" link-param )
+ link-param = ( ( "rel" "=" relation-types )
+ | ( "anchor" "=" <"> URI-Reference <"> )
+ | ( "rev" "=" relation-types )
+ | ( "hreflang" "=" Language-Tag )
+ | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) )
+ | ( "title" "=" quoted-string )
+ | ( "title*" "=" ext-value )
+ | ( "type" "=" ( media-type | quoted-mt ) )
+ | ( link-extension ) )
+ link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] )
+ | ( ext-name-star "=" ext-value )
+ ext-name-star = parmname "*" ; reserved for RFC2231-profiled
+ ; extensions. Whitespace NOT
+ ; allowed in between.
+ ptoken = 1*ptokenchar
+ ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "("
+ | ")" | "*" | "+" | "-" | "." | "/" | DIGIT
+ | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA
+ | "[" | "]" | "^" | "_" | "`" | "{" | "|"
+ | "}" | "~"
+ media-type = type-name "/" subtype-name
+ quoted-mt = <"> media-type <">
+ relation-types = relation-type
+ | <"> relation-type *( 1*SP relation-type ) <">
+ relation-type = reg-rel-type | ext-rel-type
+ reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" )
+ ext-rel-type = URI
+
+ and from <https://tools.ietf.org/html/rfc5987>
+ parmname = 1*attr-char
+ attr-char = ALPHA / DIGIT
+ / "!" / "#" / "$" / "&" / "+" / "-" / "."
+ / "^" / "_" / "`" / "|" / "~"
+ */
+
+const char *h2_proxy_link_reverse_map(request_rec *r,
+ proxy_dir_conf *conf,
+ const char *real_backend_uri,
+ const char *proxy_server_uri,
+ const char *s)
+{
+ link_ctx ctx;
+
+ if (r->proxyreq != PROXYREQ_REVERSE) {
+ return s;
+ }
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.r = r;
+ ctx.pool = r->pool;
+ ctx.conf = conf;
+ ctx.real_backend_uri = real_backend_uri;
+ ctx.rbu_len = (int)strlen(ctx.real_backend_uri);
+ ctx.p_server_uri = proxy_server_uri;
+ ctx.psu_len = (int)strlen(ctx.p_server_uri);
+ ctx.s = s;
+ ctx.slen = (int)strlen(s);
+ while (read_link(&ctx)) {
+ while (skip_param(&ctx)) {
+ /* nop */
+ }
+ map_link(&ctx);
+ if (!read_sep(&ctx)) {
+ break;
+ }
+ }
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "link_reverse_map %s --> %s", s, ctx.s);
+ return ctx.s;
+}
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+struct h2_proxy_fifo {
+ void **elems;
+ int nelems;
+ int set;
+ int head;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static int nth_index(h2_proxy_fifo *fifo, int n)
+{
+ return (fifo->head + n) % fifo->nelems;
+}
+
+static apr_status_t fifo_destroy(void *data)
+{
+ h2_proxy_fifo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int index_of(h2_proxy_fifo *fifo, void *elem)
+{
+ int i;
+
+ for (i = 0; i < fifo->count; ++i) {
+ if (elem == fifo->elems[nth_index(fifo, i)]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t create_int(h2_proxy_fifo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_proxy_fifo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(void*));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->nelems = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_proxy_fifo_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_proxy_fifo_set_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_proxy_fifo_term(h2_proxy_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_interrupt(h2_proxy_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_proxy_fifo_count(h2_proxy_fifo *fifo)
+{
+ return fifo->count;
+}
+
+int h2_proxy_fifo_capacity(h2_proxy_fifo *fifo)
+{
+ return fifo->nelems;
+}
+
+static apr_status_t check_not_empty(h2_proxy_fifo *fifo, int block)
+{
+ if (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ while (fifo->count == 0) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push(h2_proxy_fifo *fifo, void *elem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ if (fifo->set && index_of(fifo, elem) >= 0) {
+ /* set mode, elem already member */
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->nelems) {
+ if (block) {
+ while (fifo->count == fifo->nelems) {
+ if (fifo->aborted) {
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ apr_thread_mutex_unlock(fifo->lock);
+ return APR_EAGAIN;
+ }
+ }
+
+ ap_assert(fifo->count < fifo->nelems);
+ fifo->elems[nth_index(fifo, fifo->count)] = elem;
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_push(h2_proxy_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 1);
+}
+
+apr_status_t h2_proxy_fifo_try_push(h2_proxy_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 0);
+}
+
+static void *pull_head(h2_proxy_fifo *fifo)
+{
+ void *elem;
+
+ ap_assert(fifo->count > 0);
+ elem = fifo->elems[fifo->head];
+ --fifo->count;
+ if (fifo->count > 0) {
+ fifo->head = nth_index(fifo, 1);
+ if (fifo->count+1 == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ }
+ return elem;
+}
+
+static apr_status_t fifo_pull(h2_proxy_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) {
+ apr_thread_mutex_unlock(fifo->lock);
+ *pelem = NULL;
+ return rv;
+ }
+
+ ap_assert(fifo->count > 0);
+ *pelem = pull_head(fifo);
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_proxy_fifo_pull(h2_proxy_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 1);
+}
+
+apr_status_t h2_proxy_fifo_try_pull(h2_proxy_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 0);
+}
+
+apr_status_t h2_proxy_fifo_remove(h2_proxy_fifo *fifo, void *elem)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ int i, rc;
+ void *e;
+
+ rc = 0;
+ for (i = 0; i < fifo->count; ++i) {
+ e = fifo->elems[nth_index(fifo, i)];
+ if (e == elem) {
+ ++rc;
+ }
+ else if (rc) {
+ fifo->elems[nth_index(fifo, i-rc)] = e;
+ }
+ }
+ if (rc) {
+ fifo->count -= rc;
+ if (fifo->count + rc == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
diff -up --new-file httpd-2.4.23/modules/http2/h2_proxy_util.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_proxy_util.h
--- httpd-2.4.23/modules/http2/h2_proxy_util.h 2016-06-28 21:57:30.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_proxy_util.h 2017-04-10 17:04:55.000000000 +0200
@@ -19,27 +19,27 @@
/*******************************************************************************
* some debugging/format helpers
******************************************************************************/
-struct h2_request;
+struct h2_proxy_request;
struct nghttp2_frame;
-int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
+int h2_proxy_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen);
/*******************************************************************************
* ihash - hash for structs with int identifier
******************************************************************************/
-typedef struct h2_ihash_t h2_ihash_t;
-typedef int h2_ihash_iter_t(void *ctx, void *val);
+typedef struct h2_proxy_ihash_t h2_proxy_ihash_t;
+typedef int h2_proxy_ihash_iter_t(void *ctx, void *val);
/**
* Create a hash for structures that have an identifying int member.
* @param pool the pool to use
* @param offset_of_int the offsetof() the int member in the struct
*/
-h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int);
+h2_proxy_ihash_t *h2_proxy_ihash_create(apr_pool_t *pool, size_t offset_of_int);
-size_t h2_ihash_count(h2_ihash_t *ih);
-int h2_ihash_empty(h2_ihash_t *ih);
-void *h2_ihash_get(h2_ihash_t *ih, int id);
+size_t h2_proxy_ihash_count(h2_proxy_ihash_t *ih);
+int h2_proxy_ihash_empty(h2_proxy_ihash_t *ih);
+void *h2_proxy_ihash_get(h2_proxy_ihash_t *ih, int id);
/**
* Iterate over the hash members (without defined order) and invoke
@@ -49,26 +49,26 @@ void *h2_ihash_get(h2_ihash_t *ih, int i
* @param ctx user supplied data passed into each iteration call
* @return 0 if one iteration returned 0, otherwise != 0
*/
-int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx);
+int h2_proxy_ihash_iter(h2_proxy_ihash_t *ih, h2_proxy_ihash_iter_t *fn, void *ctx);
-void h2_ihash_add(h2_ihash_t *ih, void *val);
-void h2_ihash_remove(h2_ihash_t *ih, int id);
-void h2_ihash_remove_val(h2_ihash_t *ih, void *val);
-void h2_ihash_clear(h2_ihash_t *ih);
+void h2_proxy_ihash_add(h2_proxy_ihash_t *ih, void *val);
+void h2_proxy_ihash_remove(h2_proxy_ihash_t *ih, int id);
+void h2_proxy_ihash_remove_val(h2_proxy_ihash_t *ih, void *val);
+void h2_proxy_ihash_clear(h2_proxy_ihash_t *ih);
-size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max);
-size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max);
+size_t h2_proxy_ihash_shift(h2_proxy_ihash_t *ih, void **buffer, size_t max);
+size_t h2_proxy_ihash_ishift(h2_proxy_ihash_t *ih, int *buffer, size_t max);
/*******************************************************************************
* iqueue - sorted list of int with user defined ordering
******************************************************************************/
-typedef struct h2_iqueue {
+typedef struct h2_proxy_iqueue {
int *elts;
int head;
int nelts;
int nalloc;
apr_pool_t *pool;
-} h2_iqueue;
+} h2_proxy_iqueue;
/**
* Comparator for two int to determine their order.
@@ -81,26 +81,26 @@ typedef struct h2_iqueue {
* < 0: s1 should be sorted before s2
* > 0: s2 should be sorted before s1
*/
-typedef int h2_iq_cmp(int i1, int i2, void *ctx);
+typedef int h2_proxy_iq_cmp(int i1, int i2, void *ctx);
/**
* Allocate a new queue from the pool and initialize.
* @param id the identifier of the queue
* @param pool the memory pool
*/
-h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity);
+h2_proxy_iqueue *h2_proxy_iq_create(apr_pool_t *pool, int capacity);
/**
* Return != 0 iff there are no tasks in the queue.
* @param q the queue to check
*/
-int h2_iq_empty(h2_iqueue *q);
+int h2_proxy_iq_empty(h2_proxy_iqueue *q);
/**
* Return the number of int in the queue.
* @param q the queue to get size on
*/
-int h2_iq_count(h2_iqueue *q);
+int h2_proxy_iq_count(h2_proxy_iqueue *q);
/**
* Add a stream id to the queue.
@@ -110,7 +110,7 @@ int h2_iq_count(h2_iqueue *q);
* @param cmp the comparator for sorting
* @param ctx user data for comparator
*/
-void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+void h2_proxy_iq_add(h2_proxy_iqueue *q, int sid, h2_proxy_iq_cmp *cmp, void *ctx);
/**
* Remove the stream id from the queue. Return != 0 iff task
@@ -119,12 +119,12 @@ void h2_iq_add(h2_iqueue *q, int sid, h2
* @param sid the stream id to remove
* @return != 0 iff task was found in queue
*/
-int h2_iq_remove(h2_iqueue *q, int sid);
+int h2_proxy_iq_remove(h2_proxy_iqueue *q, int sid);
/**
* Remove all entries in the queue.
*/
-void h2_iq_clear(h2_iqueue *q);
+void h2_proxy_iq_clear(h2_proxy_iqueue *q);
/**
* Sort the stream idqueue again. Call if the task ordering
@@ -134,7 +134,7 @@ void h2_iq_clear(h2_iqueue *q);
* @param cmp the comparator for sorting
* @param ctx user data for the comparator
*/
-void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx);
+void h2_proxy_iq_sort(h2_proxy_iqueue *q, h2_proxy_iq_cmp *cmp, void *ctx);
/**
* Get the first stream id from the queue or NULL if the queue is empty.
@@ -143,39 +143,113 @@ void h2_iq_sort(h2_iqueue *q, h2_iq_cmp
* @param q the queue to get the first task from
* @return the first stream id of the queue, 0 if empty
*/
-int h2_iq_shift(h2_iqueue *q);
+int h2_proxy_iq_shift(h2_proxy_iqueue *q);
/*******************************************************************************
* common helpers
******************************************************************************/
-/* h2_log2(n) iff n is a power of 2 */
-unsigned char h2_log2(apr_uint32_t n);
+/* h2_proxy_log2(n) iff n is a power of 2 */
+unsigned char h2_proxy_log2(int n);
/*******************************************************************************
* HTTP/2 header helpers
******************************************************************************/
-void h2_util_camel_case_header(char *s, size_t len);
+void h2_proxy_util_camel_case_header(char *s, size_t len);
int h2_proxy_res_ignore_header(const char *name, size_t len);
/*******************************************************************************
* nghttp2 helpers
******************************************************************************/
-typedef struct h2_ngheader {
+typedef struct h2_proxy_ngheader {
nghttp2_nv *nv;
apr_size_t nvlen;
-} h2_ngheader;
-h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
- const struct h2_request *req);
+} h2_proxy_ngheader;
+h2_proxy_ngheader *h2_proxy_util_nghd_make_req(apr_pool_t *p,
+ const struct h2_proxy_request *req);
/*******************************************************************************
- * h2_request helpers
- ******************************************************************************/
-struct h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize);
-apr_status_t h2_req_make(struct h2_request *req, apr_pool_t *pool,
- const char *method, const char *scheme,
- const char *authority, const char *path,
- apr_table_t *headers);
+ * h2_proxy_request helpers
+ ******************************************************************************/
+typedef struct h2_proxy_request h2_proxy_request;
+
+struct h2_proxy_request {
+ const char *method; /* pseudo header values, see ch. 8.1.2.3 */
+ const char *scheme;
+ const char *authority;
+ const char *path;
+
+ apr_table_t *headers;
+
+ apr_time_t request_time;
+
+ unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */
+ unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+};
+
+h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize);
+apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool,
+ const char *method, const char *scheme,
+ const char *authority, const char *path,
+ apr_table_t *headers);
+
+/*******************************************************************************
+ * reverse mapping for link headers
+ ******************************************************************************/
+const char *h2_proxy_link_reverse_map(request_rec *r,
+ proxy_dir_conf *conf,
+ const char *real_server_uri,
+ const char *proxy_server_uri,
+ const char *s);
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_proxy_fifo h2_proxy_fifo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity elements. Elements can
+ * appear several times.
+ */
+apr_status_t h2_proxy_fifo_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity elements. Elements only
+ * appear once. Pushing an element already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_proxy_fifo_set_create(h2_proxy_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_proxy_fifo_term(h2_proxy_fifo *fifo);
+apr_status_t h2_proxy_fifo_interrupt(h2_proxy_fifo *fifo);
+
+int h2_proxy_fifo_capacity(h2_proxy_fifo *fifo);
+int h2_proxy_fifo_count(h2_proxy_fifo *fifo);
+/**
+ * Push en element into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param elem the element to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_proxy_fifo_push(h2_proxy_fifo *fifo, void *elem);
+apr_status_t h2_proxy_fifo_try_push(h2_proxy_fifo *fifo, void *elem);
+
+apr_status_t h2_proxy_fifo_pull(h2_proxy_fifo *fifo, void **pelem);
+apr_status_t h2_proxy_fifo_try_pull(h2_proxy_fifo *fifo, void **pelem);
+
+/**
+ * Remove the elem from the queue, will remove multiple appearances.
+ * @param elem the element to remove
+ * @return APR_SUCCESS iff > 0 elems were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_proxy_fifo_remove(h2_proxy_fifo *fifo, void *elem);
#endif /* defined(__mod_h2__h2_proxy_util__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_push.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_push.c
--- httpd-2.4.23/modules/http2/h2_push.c 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_push.c 2017-02-14 16:53:50.000000000 +0100
@@ -34,7 +34,7 @@
#include "h2_util.h"
#include "h2_push.h"
#include "h2_request.h"
-#include "h2_response.h"
+#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
@@ -58,6 +58,7 @@ static const char *policy_str(h2_push_po
typedef struct {
const h2_request *req;
+ int push_policy;
apr_pool_t *pool;
apr_array_header_t *pushes;
const char *s;
@@ -162,10 +163,10 @@ static char *mk_str(link_ctx *ctx, size_
if (ctx->i < end) {
return apr_pstrndup(ctx->pool, ctx->s + ctx->i, end - ctx->i);
}
- return "";
+ return (char*)"";
}
-static int read_qstring(link_ctx *ctx, char **ps)
+static int read_qstring(link_ctx *ctx, const char **ps)
{
if (skip_ws(ctx) && read_chr(ctx, '\"')) {
size_t end;
@@ -178,7 +179,7 @@ static int read_qstring(link_ctx *ctx, c
return 0;
}
-static int read_ptoken(link_ctx *ctx, char **ps)
+static int read_ptoken(link_ctx *ctx, const char **ps)
{
if (skip_ws(ctx)) {
size_t i;
@@ -208,7 +209,7 @@ static int read_link(link_ctx *ctx)
return 0;
}
-static int read_pname(link_ctx *ctx, char **pname)
+static int read_pname(link_ctx *ctx, const char **pname)
{
if (skip_ws(ctx)) {
size_t i;
@@ -224,7 +225,7 @@ static int read_pname(link_ctx *ctx, cha
return 0;
}
-static int read_pvalue(link_ctx *ctx, char **pvalue)
+static int read_pvalue(link_ctx *ctx, const char **pvalue)
{
if (skip_ws(ctx) && read_chr(ctx, '=')) {
if (read_qstring(ctx, pvalue) || read_ptoken(ctx, pvalue)) {
@@ -237,7 +238,7 @@ static int read_pvalue(link_ctx *ctx, ch
static int read_param(link_ctx *ctx)
{
if (skip_ws(ctx) && read_chr(ctx, ';')) {
- char *name, *value = "";
+ const char *name, *value = "";
if (read_pname(ctx, &name)) {
read_pvalue(ctx, &value); /* value is optional */
apr_table_setn(ctx->params, name, value);
@@ -336,7 +337,7 @@ static int add_push(link_ctx *ctx)
*/
path = apr_uri_unparse(ctx->pool, &uri, APR_URI_UNP_OMITSITEPART);
push = apr_pcalloc(ctx->pool, sizeof(*push));
- switch (ctx->req->push_policy) {
+ switch (ctx->push_policy) {
case H2_PUSH_HEAD:
method = "HEAD";
break;
@@ -346,13 +347,17 @@ static int add_push(link_ctx *ctx)
}
headers = apr_table_make(ctx->pool, 5);
apr_table_do(set_push_header, headers, ctx->req->headers, NULL);
- req = h2_req_createn(0, ctx->pool, method, ctx->req->scheme,
- ctx->req->authority, path, headers,
- ctx->req->serialize);
+ req = h2_req_create(0, ctx->pool, method, ctx->req->scheme,
+ ctx->req->authority, path, headers,
+ ctx->req->serialize);
/* atm, we do not push on pushes */
- h2_request_end_headers(req, ctx->pool, 1, 0);
+ h2_request_end_headers(req, ctx->pool, 1);
push->req = req;
-
+ if (has_param(ctx, "critical")) {
+ h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio));
+ prio->dependency = H2_DEPENDANT_BEFORE;
+ push->priority = prio;
+ }
if (!ctx->pushes) {
ctx->pushes = apr_array_make(ctx->pool, 5, sizeof(h2_push*));
}
@@ -427,10 +432,10 @@ static int head_iter(void *ctx, const ch
return 1;
}
-apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req,
- const h2_response *res)
+apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req,
+ int push_policy, const h2_headers *res)
{
- if (req && req->push_policy != H2_PUSH_NONE) {
+ if (req && push_policy != H2_PUSH_NONE) {
/* Collect push candidates from the request/response pair.
*
* One source for pushes are "rel=preload" link headers
@@ -444,11 +449,13 @@ apr_array_header_t *h2_push_collect(apr_
memset(&ctx, 0, sizeof(ctx));
ctx.req = req;
+ ctx.push_policy = push_policy;
ctx.pool = p;
apr_table_do(head_iter, &ctx, res->headers, NULL);
if (ctx.pushes) {
- apr_table_setn(res->headers, "push-policy", policy_str(req->push_policy));
+ apr_table_setn(res->headers, "push-policy",
+ policy_str(push_policy));
}
return ctx.pushes;
}
@@ -527,9 +534,9 @@ static unsigned int val_apr_hash(const c
static void calc_apr_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
{
apr_uint64_t val;
-#if APR_UINT64MAX > APR_UINT_MAX
- val = (val_apr_hash(push->req->scheme) << 32);
- val ^= (val_apr_hash(push->req->authority) << 16);
+#if APR_UINT64_MAX > UINT_MAX
+ val = ((apr_uint64_t)(val_apr_hash(push->req->scheme)) << 32);
+ val ^= ((apr_uint64_t)(val_apr_hash(push->req->authority)) << 16);
val ^= val_apr_hash(push->req->path);
#else
val = val_apr_hash(push->req->scheme);
@@ -552,7 +559,7 @@ static apr_int32_t ceil_power_of_2(apr_i
}
static h2_push_diary *diary_create(apr_pool_t *p, h2_push_digest_type dtype,
- apr_size_t N)
+ int N)
{
h2_push_diary *diary = NULL;
@@ -561,7 +568,7 @@ static h2_push_diary *diary_create(apr_p
diary->NMax = ceil_power_of_2(N);
diary->N = diary->NMax;
- /* the mask we use in value comparision depends on where we got
+ /* the mask we use in value comparison depends on where we got
* the values from. If we calculate them ourselves, we can use
* the full 64 bits.
* If we set the diary via a compressed golomb set, we have less
@@ -587,7 +594,7 @@ static h2_push_diary *diary_create(apr_p
return diary;
}
-h2_push_diary *h2_push_diary_create(apr_pool_t *p, apr_size_t N)
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, int N)
{
return diary_create(p, H2_PUSH_DIGEST_SHA256, N);
}
@@ -681,7 +688,7 @@ apr_array_header_t *h2_push_diary_update
apr_array_header_t *h2_push_collect_update(h2_stream *stream,
const struct h2_request *req,
- const struct h2_response *res)
+ const struct h2_headers *res)
{
h2_session *session = stream->session;
const char *cache_digest = apr_table_get(req->headers, "Cache-Digest");
@@ -693,12 +700,11 @@ apr_array_header_t *h2_push_collect_upda
cache_digest, stream->pool);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
- APLOGNO(03057)
- "h2_session(%ld): push diary set from Cache-Digest: %s",
- session->id, cache_digest);
+ H2_SSSN_LOG(APLOGNO(03057), session,
+ "push diary set from Cache-Digest: %s"), cache_digest);
}
}
- pushes = h2_push_collect(stream->pool, req, res);
+ pushes = h2_push_collect(stream->pool, req, stream->push_policy, res);
return h2_push_diary_update(stream->session, pushes);
}
@@ -711,9 +717,9 @@ static apr_int32_t h2_log2inv(unsigned c
typedef struct {
h2_push_diary *diary;
unsigned char log2p;
- apr_uint32_t mask_bits;
- apr_uint32_t delta_bits;
- apr_uint32_t fixed_bits;
+ int mask_bits;
+ int delta_bits;
+ int fixed_bits;
apr_uint64_t fixed_mask;
apr_pool_t *pool;
unsigned char *data;
@@ -812,10 +818,10 @@ static apr_status_t gset_encode_next(gse
* @param plen on successful return, the length of the binary data
*/
apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
- apr_uint32_t maxP, const char *authority,
+ int maxP, const char *authority,
const char **pdata, apr_size_t *plen)
{
- apr_size_t nelts, N, i;
+ int nelts, N, i;
unsigned char log2n, log2pmax;
gset_encoder encoder;
apr_uint64_t *hashes;
@@ -965,7 +971,7 @@ apr_status_t h2_push_diary_digest_set(h2
{
gset_decoder decoder;
unsigned char log2n, log2p;
- apr_size_t N, i;
+ int N, i;
apr_pool_t *pool = diary->entries->pool;
h2_push_diary_entry e;
apr_status_t status = APR_SUCCESS;
diff -up --new-file httpd-2.4.23/modules/http2/h2_push.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_push.h
--- httpd-2.4.23/modules/http2/h2_push.h 2016-05-30 21:58:14.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_push.h 2016-11-14 12:15:08.000000000 +0100
@@ -18,13 +18,14 @@
#include "h2.h"
struct h2_request;
-struct h2_response;
+struct h2_headers;
struct h2_ngheader;
struct h2_session;
struct h2_stream;
typedef struct h2_push {
const struct h2_request *req;
+ h2_priority *priority;
} h2_push;
typedef enum {
@@ -38,8 +39,8 @@ typedef void h2_push_digest_calc(h2_push
struct h2_push_diary {
apr_array_header_t *entries;
- apr_size_t NMax; /* Maximum for N, should size change be necessary */
- apr_size_t N; /* Current maximum number of entries, power of 2 */
+ int NMax; /* Maximum for N, should size change be necessary */
+ int N; /* Current maximum number of entries, power of 2 */
apr_uint64_t mask; /* mask for relevant bits */
unsigned int mask_bits; /* number of relevant bits */
const char *authority;
@@ -58,7 +59,8 @@ struct h2_push_diary {
*/
apr_array_header_t *h2_push_collect(apr_pool_t *p,
const struct h2_request *req,
- const struct h2_response *res);
+ int push_policy,
+ const struct h2_headers *res);
/**
* Create a new push diary for the given maximum number of entries.
@@ -67,7 +69,7 @@ apr_array_header_t *h2_push_collect(apr_
* @param N the max number of entries, rounded up to 2^x
* @return the created diary, might be NULL of max_entries is 0
*/
-h2_push_diary *h2_push_diary_create(apr_pool_t *p, apr_size_t N);
+h2_push_diary *h2_push_diary_create(apr_pool_t *p, int N);
/**
* Filters the given pushes against the diary and returns only those pushes
@@ -81,7 +83,7 @@ apr_array_header_t *h2_push_diary_update
*/
apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
const struct h2_request *req,
- const struct h2_response *res);
+ const struct h2_headers *res);
/**
* Get a cache digest as described in
* https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
@@ -94,7 +96,7 @@ apr_array_header_t *h2_push_collect_upda
* @param plen on successful return, the length of the binary data
*/
apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p,
- apr_uint32_t maxP, const char *authority,
+ int maxP, const char *authority,
const char **pdata, apr_size_t *plen);
/**
diff -up --new-file httpd-2.4.23/modules/http2/h2_request.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_request.c
--- httpd-2.4.23/modules/http2/h2_request.c 2016-05-04 15:58:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_request.c 2017-04-18 15:12:38.000000000 +0200
@@ -30,27 +30,43 @@
#include <scoreboard.h>
#include "h2_private.h"
+#include "h2_config.h"
#include "h2_push.h"
#include "h2_request.h"
#include "h2_util.h"
-static apr_status_t inspect_clen(h2_request *req, const char *s)
+typedef struct {
+ apr_table_t *headers;
+ apr_pool_t *pool;
+ apr_status_t status;
+} h1_ctx;
+
+static int set_h1_header(void *ctx, const char *key, const char *value)
{
- char *end;
- req->content_length = apr_strtoi64(s, &end, 10);
- return (s == end)? APR_EINVAL : APR_SUCCESS;
+ h1_ctx *x = ctx;
+ x->status = h2_req_add_header(x->headers, x->pool, key, strlen(key),
+ value, strlen(value));
+ return (x->status == APR_SUCCESS)? 1 : 0;
}
-apr_status_t h2_request_rwrite(h2_request *req, apr_pool_t *pool,
- request_rec *r)
+apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
+ request_rec *r)
{
- apr_status_t status;
- const char *scheme, *authority;
+ h2_request *req;
+ const char *scheme, *authority, *path;
+ h1_ctx x;
+ *preq = NULL;
scheme = apr_pstrdup(pool, r->parsed_uri.scheme? r->parsed_uri.scheme
: ap_http_scheme(r));
authority = apr_pstrdup(pool, r->hostname);
+ path = apr_uri_unparse(pool, &r->parsed_uri, APR_URI_UNP_OMITSITEPART);
+
+ if (!r->method || !scheme || !r->hostname || !path) {
+ return APR_EINVAL;
+ }
+
if (!ap_strchr_c(authority, ':') && r->server && r->server->port) {
apr_port_t defport = apr_uri_port_of_scheme(scheme);
if (defport != r->server->port) {
@@ -60,11 +76,24 @@ apr_status_t h2_request_rwrite(h2_reques
}
}
- status = h2_req_make(req, pool, apr_pstrdup(pool, r->method), scheme,
- authority, apr_uri_unparse(pool, &r->parsed_uri,
- APR_URI_UNP_OMITSITEPART),
- r->headers_in);
- return status;
+ req = apr_pcalloc(pool, sizeof(*req));
+ req->method = apr_pstrdup(pool, r->method);
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = apr_table_make(pool, 10);
+ if (r->server) {
+ req->serialize = h2_config_geti(h2_config_sget(r->server),
+ H2_CONF_SER_HEADERS);
+ }
+
+ x.pool = pool;
+ x.headers = req->headers;
+ x.status = APR_SUCCESS;
+ apr_table_do(set_h1_header, &x, r->headers_in, NULL);
+
+ *preq = req;
+ return x.status;
}
apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
@@ -82,8 +111,7 @@ apr_status_t h2_request_add_header(h2_re
if (!apr_is_empty_table(req->headers)) {
ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool,
APLOGNO(02917)
- "h2_request(%d): pseudo header after request start",
- req->id);
+ "h2_request: pseudo header after request start");
return APR_EGENERAL;
}
@@ -109,28 +137,22 @@ apr_status_t h2_request_add_header(h2_re
strncpy(buffer, name, (nlen > 31)? 31 : nlen);
ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, pool,
APLOGNO(02954)
- "h2_request(%d): ignoring unknown pseudo header %s",
- req->id, buffer);
+ "h2_request: ignoring unknown pseudo header %s",
+ buffer);
}
}
else {
/* non-pseudo header, append to work bucket of stream */
- status = h2_headers_add_h1(req->headers, pool, name, nlen, value, vlen);
+ status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen);
}
return status;
}
-apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
- int eos, int push)
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos)
{
const char *s;
- if (req->eoh) {
- /* already done */
- return APR_SUCCESS;
- }
-
/* rfc7540, ch. 8.1.2.3:
* - if we have :authority, it overrides any Host header
* - :authority MUST be ommited when converting h1->h2, so we
@@ -147,18 +169,11 @@ apr_status_t h2_request_end_headers(h2_r
}
s = apr_table_get(req->headers, "Content-Length");
- if (s) {
- if (inspect_clen(req, s) != APR_SUCCESS) {
- ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, pool,
- APLOGNO(02959)
- "h2_request(%d): content-length value not parsed: %s",
- req->id, s);
- return APR_EINVAL;
- }
- }
- else {
- /* no content-length given */
- req->content_length = -1;
+ if (!s) {
+ /* HTTP/2 does not need a Content-Length for framing, but our
+ * internal request processing is used to HTTP/1.1, so we
+ * need to either add a Content-Length or a Transfer-Encoding
+ * if any content can be expected. */
if (!eos) {
/* We have not seen a content-length and have no eos,
* simulate a chunked encoding for our HTTP/1.1 infrastructure,
@@ -168,68 +183,16 @@ apr_status_t h2_request_end_headers(h2_r
apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
}
else if (apr_table_get(req->headers, "Content-Type")) {
- /* If we have a content-type, but already see eos, no more
+ /* If we have a content-type, but already seen eos, no more
* data will come. Signal a zero content length explicitly.
*/
apr_table_setn(req->headers, "Content-Length", "0");
}
}
- req->eoh = 1;
- h2_push_policy_determine(req, pool, push);
-
- /* In the presence of trailers, force behaviour of chunked encoding */
- s = apr_table_get(req->headers, "Trailer");
- if (s && s[0]) {
- req->trailers = apr_table_make(pool, 5);
- if (!req->chunked) {
- req->chunked = 1;
- apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
- }
- }
-
- return APR_SUCCESS;
-}
-
-static apr_status_t add_h1_trailer(h2_request *req, apr_pool_t *pool,
- const char *name, size_t nlen,
- const char *value, size_t vlen)
-{
- char *hname, *hvalue;
-
- if (h2_req_ignore_trailer(name, nlen)) {
- return APR_SUCCESS;
- }
-
- hname = apr_pstrndup(pool, name, nlen);
- hvalue = apr_pstrndup(pool, value, vlen);
- h2_util_camel_case_header(hname, nlen);
-
- apr_table_mergen(req->trailers, hname, hvalue);
-
return APR_SUCCESS;
}
-
-apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
- const char *name, size_t nlen,
- const char *value, size_t vlen)
-{
- if (!req->trailers) {
- ap_log_perror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, pool, APLOGNO(03059)
- "h2_request(%d): unanounced trailers",
- req->id);
- return APR_EINVAL;
- }
- if (nlen == 0 || name[0] == ':') {
- ap_log_perror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, pool, APLOGNO(03060)
- "h2_request(%d): pseudo header in trailer",
- req->id);
- return APR_EINVAL;
- }
- return add_h1_trailer(req, pool, name, nlen, value, vlen);
-}
-
h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
{
h2_request *dst = apr_pmemdup(p, src, sizeof(*dst));
@@ -238,25 +201,24 @@ h2_request *h2_request_clone(apr_pool_t
dst->authority = apr_pstrdup(p, src->authority);
dst->path = apr_pstrdup(p, src->path);
dst->headers = apr_table_clone(p, src->headers);
- if (src->trailers) {
- dst->trailers = apr_table_clone(p, src->trailers);
- }
return dst;
}
-request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn)
+request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
{
- request_rec *r;
- apr_pool_t *p;
int access_status = HTTP_OK;
-
- apr_pool_create(&p, conn->pool);
+ const char *rpath;
+ apr_pool_t *p;
+ request_rec *r;
+ const char *s;
+
+ apr_pool_create(&p, c->pool);
apr_pool_tag(p, "request");
r = apr_pcalloc(p, sizeof(request_rec));
- AP_READ_REQUEST_ENTRY((intptr_t)r, (uintptr_t)conn);
+ AP_READ_REQUEST_ENTRY((intptr_t)r, (uintptr_t)c);
r->pool = p;
- r->connection = conn;
- r->server = conn->base_server;
+ r->connection = c;
+ r->server = c->base_server;
r->user = NULL;
r->ap_auth_type = NULL;
@@ -274,9 +236,9 @@ request_rec *h2_request_create_rec(const
r->request_config = ap_create_request_config(r->pool);
/* Must be set before we run create request hook */
- r->proto_output_filters = conn->output_filters;
+ r->proto_output_filters = c->output_filters;
r->output_filters = r->proto_output_filters;
- r->proto_input_filters = conn->input_filters;
+ r->proto_input_filters = c->input_filters;
r->input_filters = r->proto_input_filters;
ap_run_create_request(r);
r->per_dir_config = r->server->lookup_defaults;
@@ -295,10 +257,10 @@ request_rec *h2_request_create_rec(const
*/
r->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
- r->useragent_addr = conn->client_addr;
- r->useragent_ip = conn->client_ip;
+ r->useragent_addr = c->client_addr;
+ r->useragent_ip = c->client_ip;
- ap_run_pre_read_request(r, conn);
+ ap_run_pre_read_request(r, c);
/* Time to populate r with the data we have. */
r->request_time = req->request_time;
@@ -309,12 +271,13 @@ request_rec *h2_request_create_rec(const
r->header_only = 1;
}
- ap_parse_uri(r, req->path);
- r->protocol = "HTTP/2.0";
+ rpath = (req->path ? req->path : "");
+ ap_parse_uri(r, rpath);
+ r->protocol = (char*)"HTTP/2.0";
r->proto_num = HTTP_VERSION(2, 0);
r->the_request = apr_psprintf(r->pool, "%s %s %s",
- r->method, req->path, r->protocol);
+ r->method, rpath, r->protocol);
/* update what we think the virtual host is based on the headers we've
* now read. may update status.
@@ -327,6 +290,17 @@ request_rec *h2_request_create_rec(const
/* we may have switched to another server */
r->per_dir_config = r->server->lookup_defaults;
+ s = apr_table_get(r->headers_in, "Expect");
+ if (s && s[0]) {
+ if (ap_cstr_casecmp(s, "100-continue") == 0) {
+ r->expecting_100 = 1;
+ }
+ else {
+ r->status = HTTP_EXPECTATION_FAILED;
+ ap_send_error_response(r, 0);
+ }
+ }
+
/*
* Add the HTTP_IN filter here to ensure that ap_discard_request_body
* called by ap_die and by ap_send_error_response works correctly on
@@ -341,16 +315,16 @@ request_rec *h2_request_create_rec(const
/* Request check post hooks failed. An example of this would be a
* request for a vhost where h2 is disabled --> 421.
*/
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, conn, APLOGNO()
- "h2_request(%d): access_status=%d, request_create failed",
- req->id, access_status);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03367)
+ "h2_request: access_status=%d, request_create failed",
+ access_status);
ap_die(access_status, r);
- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
+ ap_update_child_status(c->sbh, SERVER_BUSY_LOG, r);
ap_run_log_transaction(r);
r = NULL;
goto traceout;
}
-
+
AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method,
(char *)r->uri, (char *)r->server->defn_name,
r->status);
diff -up --new-file httpd-2.4.23/modules/http2/h2_request.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_request.h
--- httpd-2.4.23/modules/http2/h2_request.h 2016-05-04 15:58:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_request.h 2016-10-03 14:57:47.000000000 +0200
@@ -18,8 +18,8 @@
#include "h2.h"
-apr_status_t h2_request_rwrite(h2_request *req, apr_pool_t *pool,
- request_rec *r);
+apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
+ request_rec *r);
apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
const char *name, size_t nlen,
@@ -29,8 +29,7 @@ apr_status_t h2_request_add_trailer(h2_r
const char *name, size_t nlen,
const char *value, size_t vlen);
-apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
- int eos, int push);
+apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos);
h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
diff -up --new-file httpd-2.4.23/modules/http2/h2_response.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_response.c
--- httpd-2.4.23/modules/http2/h2_response.c 2016-05-18 17:10:20.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_response.c 1970-01-01 01:00:00.000000000 +0100
@@ -1,205 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include <apr_strings.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_log.h>
-#include <util_time.h>
-
-#include <nghttp2/nghttp2.h>
-
-#include "h2_private.h"
-#include "h2_filter.h"
-#include "h2_h2.h"
-#include "h2_util.h"
-#include "h2_request.h"
-#include "h2_response.h"
-
-
-static apr_table_t *parse_headers(apr_array_header_t *hlines, apr_pool_t *pool)
-{
- if (hlines) {
- apr_table_t *headers = apr_table_make(pool, hlines->nelts);
- int i;
-
- for (i = 0; i < hlines->nelts; ++i) {
- char *hline = ((char **)hlines->elts)[i];
- char *sep = ap_strchr(hline, ':');
- if (!sep) {
- ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, pool,
- APLOGNO(02955) "h2_response: invalid header[%d] '%s'",
- i, (char*)hline);
- /* not valid format, abort */
- return NULL;
- }
- (*sep++) = '\0';
- while (*sep == ' ' || *sep == '\t') {
- ++sep;
- }
-
- if (!h2_util_ignore_header(hline)) {
- apr_table_merge(headers, hline, sep);
- }
- }
- return headers;
- }
- else {
- return apr_table_make(pool, 0);
- }
-}
-
-static const char *get_sos_filter(apr_table_t *notes)
-{
- return notes? apr_table_get(notes, H2_RESP_SOS_NOTE) : NULL;
-}
-
-static void check_clen(h2_response *response, request_rec *r, apr_pool_t *pool)
-{
-
- if (r && r->header_only) {
- response->content_length = 0;
- }
- else if (response->headers) {
- const char *s = apr_table_get(response->headers, "Content-Length");
- if (s) {
- char *end;
- response->content_length = apr_strtoi64(s, &end, 10);
- if (s == end) {
- ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL,
- pool, APLOGNO(02956)
- "h2_response: content-length"
- " value not parsed: %s", s);
- response->content_length = -1;
- }
- }
- }
-}
-
-static h2_response *h2_response_create_int(int stream_id,
- int rst_error,
- int http_status,
- apr_table_t *headers,
- apr_table_t *notes,
- apr_pool_t *pool)
-{
- h2_response *response;
-
- if (!headers) {
- return NULL;
- }
-
- response = apr_pcalloc(pool, sizeof(h2_response));
- if (response == NULL) {
- return NULL;
- }
-
- response->stream_id = stream_id;
- response->rst_error = rst_error;
- response->http_status = http_status? http_status : 500;
- response->content_length = -1;
- response->headers = headers;
- response->sos_filter = get_sos_filter(notes);
-
- check_clen(response, NULL, pool);
- return response;
-}
-
-
-h2_response *h2_response_create(int stream_id,
- int rst_error,
- int http_status,
- apr_array_header_t *hlines,
- apr_table_t *notes,
- apr_pool_t *pool)
-{
- return h2_response_create_int(stream_id, rst_error, http_status,
- parse_headers(hlines, pool), notes, pool);
-}
-
-h2_response *h2_response_rcreate(int stream_id, request_rec *r,
- apr_table_t *header, apr_pool_t *pool)
-{
- h2_response *response = apr_pcalloc(pool, sizeof(h2_response));
- if (response == NULL) {
- return NULL;
- }
-
- response->stream_id = stream_id;
- response->http_status = r->status;
- response->content_length = -1;
- response->headers = header;
- response->sos_filter = get_sos_filter(r->notes);
-
- check_clen(response, r, pool);
-
- if (response->http_status == HTTP_FORBIDDEN) {
- const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
- if (cause) {
- /* This request triggered a TLS renegotiation that is now allowed
- * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
- */
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, response->http_status, r,
- APLOGNO(03061)
- "h2_response(%ld-%d): renegotiate forbidden, cause: %s",
- (long)r->connection->id, stream_id, cause);
- response->rst_error = H2_ERR_HTTP_1_1_REQUIRED;
- }
- }
-
- return response;
-}
-
-h2_response *h2_response_die(int stream_id, apr_status_t type,
- const struct h2_request *req, apr_pool_t *pool)
-{
- apr_table_t *headers = apr_table_make(pool, 5);
- char *date = NULL;
- int status = (type >= 200 && type < 600)? type : 500;
-
- date = apr_palloc(pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, req->request_time);
- apr_table_setn(headers, "Date", date);
- apr_table_setn(headers, "Server", ap_get_server_banner());
-
- return h2_response_create_int(stream_id, 0, status, headers, NULL, pool);
-}
-
-h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from)
-{
- h2_response *to = apr_pcalloc(pool, sizeof(h2_response));
-
- to->stream_id = from->stream_id;
- to->http_status = from->http_status;
- to->content_length = from->content_length;
- to->sos_filter = from->sos_filter;
- if (from->headers) {
- to->headers = apr_table_clone(pool, from->headers);
- }
- if (from->trailers) {
- to->trailers = apr_table_clone(pool, from->trailers);
- }
- return to;
-}
-
-void h2_response_set_trailers(h2_response *response, apr_table_t *trailers)
-{
- response->trailers = trailers;
-}
-
diff -up --new-file httpd-2.4.23/modules/http2/h2_response.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_response.h
--- httpd-2.4.23/modules/http2/h2_response.h 2016-03-02 12:21:28.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_response.h 1970-01-01 01:00:00.000000000 +0100
@@ -1,73 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_response__
-#define __mod_h2__h2_response__
-
-#include "h2.h"
-
-/**
- * Create the response from the status and parsed header lines.
- * @param stream_id id of the stream to create the response for
- * @param rst_error error for reset or 0
- * @param http_status http status code of response
- * @param hlines the text lines of the response header
- * @param pool the memory pool to use
- */
-h2_response *h2_response_create(int stream_id,
- int rst_error,
- int http_status,
- apr_array_header_t *hlines,
- apr_table_t *notes,
- apr_pool_t *pool);
-
-/**
- * Create the response from the given request_rec.
- * @param stream_id id of the stream to create the response for
- * @param r the request record which was processed
- * @param header the headers of the response
- * @param pool the memory pool to use
- */
-h2_response *h2_response_rcreate(int stream_id, request_rec *r,
- apr_table_t *header, apr_pool_t *pool);
-
-/**
- * Create the response for the given error.
- * @param stream_id id of the stream to create the response for
- * @param type the error code
- * @param req the original h2_request
- * @param pool the memory pool to use
- */
-h2_response *h2_response_die(int stream_id, apr_status_t type,
- const struct h2_request *req, apr_pool_t *pool);
-
-/**
- * Deep copies the response into a new pool.
- * @param pool the pool to use for the clone
- * @param from the response to clone
- * @return the cloned response
- */
-h2_response *h2_response_clone(apr_pool_t *pool, h2_response *from);
-
-/**
- * Set the trailers in the reponse. Will replace any existing trailers. Will
- * *not* clone the table.
- *
- * @param response the repsone to set the trailers for
- * @param trailers the trailers to set
- */
-void h2_response_set_trailers(h2_response *response, apr_table_t *trailers);
-
-#endif /* defined(__mod_h2__h2_response__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_session.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_session.c
--- httpd-2.4.23/modules/http2/h2_session.c 2017-12-27 23:01:33.408186020 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_session.c 2017-07-04 14:34:15.000000000 +0200
@@ -27,9 +27,11 @@
#include <http_log.h>
#include <scoreboard.h>
+#include <mpm_common.h>
+
#include "h2_private.h"
#include "h2.h"
-#include "h2_bucket_eoc.h"
+#include "h2_bucket_beam.h"
#include "h2_bucket_eos.h"
#include "h2_config.h"
#include "h2_ctx.h"
@@ -38,9 +40,8 @@
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
-#include "h2_response.h"
+#include "h2_headers.h"
#include "h2_stream.h"
-#include "h2_from_h1.h"
#include "h2_task.h"
#include "h2_session.h"
#include "h2_util.h"
@@ -48,6 +49,15 @@
#include "h2_workers.h"
+static apr_status_t dispatch_master(h2_session *session);
+static apr_status_t h2_session_read(h2_session *session, int block);
+static void transit(h2_session *session, const char *action,
+ h2_session_state nstate);
+
+static void on_stream_state_enter(void *ctx, h2_stream *stream);
+static void on_stream_state_event(void *ctx, h2_stream *stream, h2_stream_event_t ev);
+static void on_stream_event(void *ctx, h2_stream *stream, h2_stream_event_t ev);
+
static int h2_session_status_from_apr_status(apr_status_t rv)
{
if (rv == APR_SUCCESS) {
@@ -62,80 +72,44 @@ static int h2_session_status_from_apr_st
return NGHTTP2_ERR_PROTO;
}
-static void update_window(void *ctx, int stream_id, apr_off_t bytes_read)
+h2_stream *h2_session_stream_get(h2_session *session, int stream_id)
{
- h2_session *session = (h2_session*)ctx;
- nghttp2_session_consume(session->ngh2, stream_id, bytes_read);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_session(%ld-%d): consumed %ld bytes",
- session->id, stream_id, (long)bytes_read);
+ return nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
}
-static apr_status_t h2_session_receive(void *ctx,
- const char *data, apr_size_t len,
- apr_size_t *readlen);
-
-static int is_accepting_streams(h2_session *session);
static void dispatch_event(h2_session *session, h2_session_event_t ev,
int err, const char *msg);
-apr_status_t h2_session_stream_done(h2_session *session, h2_stream *stream)
+void h2_session_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg)
{
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_stream(%ld-%d): EOS bucket cleanup -> done",
- session->id, stream->id);
- h2_ihash_remove(session->streams, stream->id);
- h2_mplx_stream_done(session->mplx, stream);
-
- dispatch_event(session, H2_SESSION_EV_STREAM_DONE, 0, NULL);
- return APR_SUCCESS;
+ dispatch_event(session, ev, err, msg);
}
-typedef struct stream_sel_ctx {
- h2_session *session;
- h2_stream *candidate;
-} stream_sel_ctx;
-
-static int find_cleanup_stream(void *udata, void *sdata)
+static int rst_unprocessed_stream(h2_stream *stream, void *ctx)
{
- stream_sel_ctx *ctx = udata;
- h2_stream *stream = sdata;
- if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
- if (!ctx->session->local.accepting
- && stream->id > ctx->session->local.accepted_max) {
- ctx->candidate = stream;
- return 0;
- }
- }
- else {
- if (!ctx->session->remote.accepting
- && stream->id > ctx->session->remote.accepted_max) {
- ctx->candidate = stream;
- return 0;
- }
+ int unprocessed = (!h2_stream_was_closed(stream)
+ && (H2_STREAM_CLIENT_INITIATED(stream->id)?
+ (!stream->session->local.accepting
+ && stream->id > stream->session->local.accepted_max)
+ :
+ (!stream->session->remote.accepting
+ && stream->id > stream->session->remote.accepted_max))
+ );
+ if (unprocessed) {
+ h2_stream_rst(stream, H2_ERR_NO_ERROR);
+ return 0;
}
return 1;
}
-static void cleanup_streams(h2_session *session)
+static void cleanup_unprocessed_streams(h2_session *session)
{
- stream_sel_ctx ctx;
- ctx.session = session;
- ctx.candidate = NULL;
- while (1) {
- h2_ihash_iter(session->streams, find_cleanup_stream, &ctx);
- if (ctx.candidate) {
- h2_session_stream_done(session, ctx.candidate);
- ctx.candidate = NULL;
- }
- else {
- break;
- }
- }
+ h2_mplx_stream_do(session->mplx, rst_unprocessed_stream, session);
}
-h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
- int initiated_on, const h2_request *req)
+static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
+ int initiated_on)
{
h2_stream * stream;
apr_pool_t *stream_pool;
@@ -143,26 +117,11 @@ h2_stream *h2_session_open_stream(h2_ses
apr_pool_create(&stream_pool, session->pool);
apr_pool_tag(stream_pool, "h2_stream");
- stream = h2_stream_open(stream_id, stream_pool, session,
- initiated_on, req);
- nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream);
- h2_ihash_add(session->streams, stream);
-
- if (H2_STREAM_CLIENT_INITIATED(stream_id)) {
- if (stream_id > session->remote.emitted_max) {
- ++session->remote.emitted_count;
- session->remote.emitted_max = stream->id;
- session->local.accepted_max = stream->id;
- }
- }
- else {
- if (stream_id > session->local.emitted_max) {
- ++session->local.emitted_count;
- session->remote.emitted_max = stream->id;
- }
+ stream = h2_stream_create(stream_id, stream_pool, session,
+ session->monitor, initiated_on);
+ if (stream) {
+ nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream);
}
- dispatch_event(session, H2_SESSION_EV_STREAM_OPEN, 0, NULL);
-
return stream;
}
@@ -219,14 +178,6 @@ static int stream_pri_cmp(int sid1, int
return spri_cmp(sid1, s1, sid2, s2, session);
}
-static apr_status_t stream_schedule(h2_session *session,
- h2_stream *stream, int eos)
-{
- (void)session;
- return h2_stream_schedule(stream, eos, h2_session_push_enabled(session),
- stream_pri_cmp, session);
-}
-
/*
* Callback when nghttp2 wants to send bytes back to the client.
*/
@@ -236,9 +187,9 @@ static ssize_t send_cb(nghttp2_session *
{
h2_session *session = (h2_session *)userp;
apr_status_t status;
-
(void)ngh2;
(void)flags;
+
status = h2_conn_io_write(&session->io, (const char *)data, length);
if (status == APR_SUCCESS) {
return length;
@@ -262,96 +213,40 @@ static int on_invalid_frame_recv_cb(nght
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03063)
- "h2_session(%ld): recv invalid FRAME[%s], frames=%ld/%ld (r/s)",
- session->id, buffer, (long)session->frames_received,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03063), session,
+ "recv invalid FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
(long)session->frames_sent);
}
return 0;
}
-static h2_stream *get_stream(h2_session *session, int stream_id)
-{
- return nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
-}
-
static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
int32_t stream_id,
const uint8_t *data, size_t len, void *userp)
{
h2_session *session = (h2_session *)userp;
- apr_status_t status = APR_SUCCESS;
+ apr_status_t status = APR_EINVAL;
h2_stream * stream;
- int rv;
+ int rv = 0;
- (void)flags;
- if (!is_accepting_streams(session)) {
- /* ignore */
- return 0;
+ stream = h2_session_stream_get(session, stream_id);
+ if (stream) {
+ status = h2_stream_recv_DATA(stream, flags, data, len);
}
-
- stream = get_stream(session, stream_id);
- if (!stream) {
+ else {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064)
"h2_stream(%ld-%d): on_data_chunk for unknown stream",
session->id, (int)stream_id);
- rv = nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id,
- NGHTTP2_INTERNAL_ERROR);
- if (nghttp2_is_fatal(rv)) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
- return 0;
- }
-
- /* FIXME: enabling setting EOS this way seems to break input handling
- * in mod_proxy_http2. why? */
- status = h2_stream_write_data(stream, (const char *)data, len,
- 0 /*flags & NGHTTP2_FLAG_END_STREAM*/);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
- "h2_stream(%ld-%d): data_chunk_recv, written %ld bytes",
- session->id, stream_id, (long)len);
- if (status != APR_SUCCESS) {
- update_window(session, stream_id, len);
- rv = nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id,
- H2_STREAM_RST(stream, H2_ERR_INTERNAL_ERROR));
- if (nghttp2_is_fatal(rv)) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
+ rv = NGHTTP2_ERR_CALLBACK_FAILURE;
}
- return 0;
-}
-
-static apr_status_t stream_release(h2_session *session,
- h2_stream *stream,
- uint32_t error_code)
-{
- conn_rec *c = session->c;
- apr_bucket *b;
- apr_status_t status;
- if (!error_code) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_stream(%ld-%d): handled, closing",
- session->id, (int)stream->id);
- if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
- if (stream->id > session->local.completed_max) {
- session->local.completed_max = stream->id;
- }
- }
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03065)
- "h2_stream(%ld-%d): closing with err=%d %s",
- session->id, (int)stream->id, (int)error_code,
- h2_h2_err_description(error_code));
- h2_stream_rst(stream, error_code);
+ if (status != APR_SUCCESS) {
+ /* count this as consumed explicitly as no one will read it */
+ nghttp2_session_consume(session->ngh2, stream_id, len);
}
-
- b = h2_bucket_eos_create(c->bucket_alloc, stream);
- APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
- status = h2_conn_io_pass(&session->io, session->bbtmp);
- apr_brigade_cleanup(session->bbtmp);
- return status;
+ return rv;
}
static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id,
@@ -361,9 +256,15 @@ static int on_stream_close_cb(nghttp2_se
h2_stream *stream;
(void)ngh2;
- stream = get_stream(session, stream_id);
+ stream = h2_session_stream_get(session, stream_id);
if (stream) {
- stream_release(session, stream, error_code);
+ if (error_code) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03065), stream,
+ "closing with err=%d %s"),
+ (int)error_code, h2_h2_err_description(error_code));
+ h2_stream_rst(stream, error_code);
+ }
}
return 0;
}
@@ -377,12 +278,12 @@ static int on_begin_headers_cb(nghttp2_s
/* We may see HEADERs at the start of a stream or after all DATA
* streams to carry trailers. */
(void)ngh2;
- s = get_stream(session, frame->hd.stream_id);
+ s = h2_session_stream_get(session, frame->hd.stream_id);
if (s) {
/* nop */
}
else {
- s = h2_session_open_stream(userp, frame->hd.stream_id, 0, NULL);
+ s = h2_session_open_stream(userp, frame->hd.stream_id, 0);
}
return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED;
}
@@ -398,30 +299,17 @@ static int on_header_cb(nghttp2_session
apr_status_t status;
(void)flags;
- if (!is_accepting_streams(session)) {
- /* just ignore */
- return 0;
- }
-
- stream = get_stream(session, frame->hd.stream_id);
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
if (!stream) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- APLOGNO(02920)
- "h2_session: stream(%ld-%d): on_header unknown stream",
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(02920)
+ "h2_stream(%ld-%d): on_header unknown stream",
session->id, (int)frame->hd.stream_id);
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
status = h2_stream_add_header(stream, (const char *)name, namelen,
(const char *)value, valuelen);
- if (status == APR_ECONNRESET) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
- "h2-stream(%ld-%d): on_header, reset stream",
- session->id, stream->id);
- nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream->id,
- NGHTTP2_INTERNAL_ERROR);
- }
- else if (status != APR_SUCCESS && !stream->response) {
+ if (status != APR_SUCCESS && !h2_stream_is_ready(stream)) {
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
return 0;
@@ -437,16 +325,17 @@ static int on_frame_recv_cb(nghttp2_sess
void *userp)
{
h2_session *session = (h2_session *)userp;
- apr_status_t status = APR_SUCCESS;
h2_stream *stream;
+ apr_status_t rv = APR_SUCCESS;
if (APLOGcdebug(session->c)) {
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03066)
- "h2_session(%ld): recv FRAME[%s], frames=%ld/%ld (r/s)",
- session->id, buffer, (long)session->frames_received,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03066), session,
+ "recv FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
(long)session->frames_sent);
}
@@ -456,49 +345,25 @@ static int on_frame_recv_cb(nghttp2_sess
/* This can be HEADERS for a new stream, defining the request,
* or HEADER may come after DATA at the end of a stream as in
* trailers */
- stream = get_stream(session, frame->hd.stream_id);
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
if (stream) {
- int eos = (frame->hd.flags & NGHTTP2_FLAG_END_STREAM);
-
- if (h2_stream_is_scheduled(stream)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "h2_stream(%ld-%d): TRAILER, eos=%d",
- session->id, frame->hd.stream_id, eos);
- if (eos) {
- status = h2_stream_close_input(stream);
- }
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "h2_stream(%ld-%d): HEADER, eos=%d",
- session->id, frame->hd.stream_id, eos);
- status = stream_schedule(session, stream, eos);
- }
- }
- else {
- status = APR_EINVAL;
+ rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags);
}
break;
case NGHTTP2_DATA:
- stream = get_stream(session, frame->hd.stream_id);
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
if (stream) {
- int eos = (frame->hd.flags & NGHTTP2_FLAG_END_STREAM);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "h2_stream(%ld-%d): DATA, len=%ld, eos=%d",
- session->id, frame->hd.stream_id,
- (long)frame->hd.length, eos);
- if (eos) {
- status = h2_stream_close_input(stream);
- }
- }
- else {
- status = APR_EINVAL;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(02923), stream,
+ "DATA, len=%ld, flags=%d"),
+ (long)frame->hd.length, frame->hd.flags);
+ rv = h2_stream_recv_frame(stream, NGHTTP2_DATA, frame->hd.flags);
}
break;
case NGHTTP2_PRIORITY:
session->reprioritize = 1;
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_session: stream(%ld-%d): PRIORITY frame "
+ "h2_stream(%ld-%d): PRIORITY frame "
" weight=%d, dependsOn=%d, exclusive=%d",
session->id, (int)frame->hd.stream_id,
frame->priority.pri_spec.weight,
@@ -507,18 +372,17 @@ static int on_frame_recv_cb(nghttp2_sess
break;
case NGHTTP2_WINDOW_UPDATE:
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_session: stream(%ld-%d): WINDOW_UPDATE "
- "incr=%d",
+ "h2_stream(%ld-%d): WINDOW_UPDATE incr=%d",
session->id, (int)frame->hd.stream_id,
frame->window_update.window_size_increment);
break;
case NGHTTP2_RST_STREAM:
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
- "h2_session(%ld-%d): RST_STREAM by client, errror=%d",
+ "h2_stream(%ld-%d): RST_STREAM by client, errror=%d",
session->id, (int)frame->hd.stream_id,
(int)frame->rst_stream.error_code);
- stream = get_stream(session, frame->hd.stream_id);
- if (stream && stream->request && stream->request->initiated_on) {
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (stream && stream->initiated_on) {
++session->pushes_reset;
}
else {
@@ -526,9 +390,16 @@ static int on_frame_recv_cb(nghttp2_sess
}
break;
case NGHTTP2_GOAWAY:
- session->remote.accepted_max = frame->goaway.last_stream_id;
- session->remote.error = frame->goaway.error_code;
- dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY, 0, NULL);
+ if (frame->goaway.error_code == 0
+ && frame->goaway.last_stream_id == ((1u << 31) - 1)) {
+ /* shutdown notice. Should not come from a client... */
+ session->remote.accepting = 0;
+ }
+ else {
+ session->remote.accepted_max = frame->goaway.last_stream_id;
+ dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY,
+ frame->goaway.error_code, NULL);
+ }
break;
default:
if (APLOGctrace2(session->c)) {
@@ -537,27 +408,21 @@ static int on_frame_recv_cb(nghttp2_sess
h2_util_frame_print(frame, buffer,
sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_session: on_frame_rcv %s", buffer);
+ H2_SSSN_MSG(session, "on_frame_rcv %s"), buffer);
}
break;
}
+ return (APR_SUCCESS == rv)? 0 : NGHTTP2_ERR_PROTO;
+}
- if (status != APR_SUCCESS) {
- int rv;
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
- APLOGNO(02923)
- "h2_session: stream(%ld-%d): error handling frame",
- session->id, (int)frame->hd.stream_id);
- rv = nghttp2_submit_rst_stream(ng2s, NGHTTP2_FLAG_NONE,
- frame->hd.stream_id,
- NGHTTP2_INTERNAL_ERROR);
- if (nghttp2_is_fatal(rv)) {
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
+static int h2_session_continue_data(h2_session *session) {
+ if (h2_mplx_has_master_events(session->mplx)) {
+ return 0;
}
-
- return 0;
+ if (h2_conn_io_needs_flush(&session->io)) {
+ return 0;
+ }
+ return 1;
}
static char immortal_zeros[H2_MAX_PADLEN];
@@ -580,24 +445,28 @@ static int on_send_data_cb(nghttp2_sessi
(void)ngh2;
(void)source;
+ if (!h2_session_continue_data(session)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+
if (frame->data.padlen > H2_MAX_PADLEN) {
return NGHTTP2_ERR_PROTO;
}
padlen = (unsigned char)frame->data.padlen;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_stream(%ld-%d): send_data_cb for %ld bytes",
- session->id, (int)stream_id, (long)length);
-
- stream = get_stream(session, stream_id);
+ stream = h2_session_stream_get(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c,
APLOGNO(02924)
- "h2_stream(%ld-%d): send_data, lookup stream",
+ "h2_stream(%ld-%d): send_data, stream not found",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "send_data_cb for %ld bytes"),
+ (long)length);
+
status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
if (padlen && status == APR_SUCCESS) {
status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
@@ -605,23 +474,22 @@ static int on_send_data_cb(nghttp2_sessi
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
- "h2_stream(%ld-%d): writing frame header",
- session->id, (int)stream_id);
+ H2_STRM_MSG(stream, "writing frame header"));
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
status = h2_stream_read_to(stream, session->bbtmp, &len, &eos);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
- "h2_stream(%ld-%d): send_data_cb, reading stream",
- session->id, (int)stream_id);
+ H2_STRM_MSG(stream, "send_data_cb, reading stream"));
+ apr_brigade_cleanup(session->bbtmp);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
else if (len != length) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
- "h2_stream(%ld-%d): send_data_cb, wanted %ld bytes, "
- "got %ld from stream",
- session->id, (int)stream_id, (long)length, (long)len);
+ H2_STRM_MSG(stream, "send_data_cb, wanted %ld bytes, "
+ "got %ld from stream"), (long)length, (long)len);
+ apr_brigade_cleanup(session->bbtmp);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
@@ -632,17 +500,16 @@ static int on_send_data_cb(nghttp2_sessi
}
status = h2_conn_io_pass(&session->io, session->bbtmp);
-
apr_brigade_cleanup(session->bbtmp);
+
if (status == APR_SUCCESS) {
- stream->data_frames_sent++;
+ stream->out_data_frames++;
+ stream->out_data_octets += length;
return 0;
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
- APLOGNO(02925)
- "h2_stream(%ld-%d): failed send_data_cb",
- session->id, (int)stream_id);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(02925), stream, "failed send_data_cb"));
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
}
@@ -652,19 +519,62 @@ static int on_frame_send_cb(nghttp2_sess
void *user_data)
{
h2_session *session = user_data;
+ h2_stream *stream;
+ int stream_id = frame->hd.stream_id;
+
+ ++session->frames_sent;
+ switch (frame->hd.type) {
+ case NGHTTP2_PUSH_PROMISE:
+ /* PUSH_PROMISE we report on the promised stream */
+ stream_id = frame->push_promise.promised_stream_id;
+ break;
+ default:
+ break;
+ }
+
if (APLOGcdebug(session->c)) {
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03068)
- "h2_session(%ld): sent FRAME[%s], frames=%ld/%ld (r/s)",
- session->id, buffer, (long)session->frames_received,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03068), session,
+ "sent FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
(long)session->frames_sent);
}
- ++session->frames_sent;
+
+ stream = h2_session_stream_get(session, stream_id);
+ if (stream) {
+ h2_stream_send_frame(stream, frame->hd.type, frame->hd.flags);
+ }
return 0;
}
+#ifdef H2_NG2_INVALID_HEADER_CB
+static int on_invalid_header_cb(nghttp2_session *ngh2,
+ const nghttp2_frame *frame,
+ const uint8_t *name, size_t namelen,
+ const uint8_t *value, size_t valuelen,
+ uint8_t flags, void *user_data)
+{
+ h2_session *session = user_data;
+ h2_stream *stream;
+
+ if (APLOGcdebug(session->c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03456)
+ "h2_stream(%ld-%d): invalid header '%s: %s'",
+ session->id, (int)frame->hd.stream_id,
+ apr_pstrndup(session->pool, (const char *)name, namelen),
+ apr_pstrndup(session->pool, (const char *)value, valuelen));
+ }
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (stream) {
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ }
+ return 0;
+}
+#endif
+
#define NGH2_SET_CALLBACK(callbacks, name, fn)\
nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
@@ -687,38 +597,30 @@ static apr_status_t init_callbacks(conn_
NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb);
NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb);
NGH2_SET_CALLBACK(*pcb, on_frame_send, on_frame_send_cb);
-
+#ifdef H2_NG2_INVALID_HEADER_CB
+ NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb);
+#endif
return APR_SUCCESS;
}
-static void h2_session_destroy(h2_session *session)
+static apr_status_t h2_session_shutdown_notice(h2_session *session)
{
- AP_DEBUG_ASSERT(session);
-
- h2_ihash_clear(session->streams);
- if (session->mplx) {
- h2_mplx_set_consumed_cb(session->mplx, NULL, NULL);
- h2_mplx_release_and_join(session->mplx, session->iowait);
- session->mplx = NULL;
- }
-
- ap_remove_input_filter_byhandle((session->r? session->r->input_filters :
- session->c->input_filters), "H2_IN");
- if (session->ngh2) {
- nghttp2_session_del(session->ngh2);
- session->ngh2 = NULL;
- }
- if (session->c) {
- h2_ctx_clear(session->c);
- }
-
- if (APLOGctrace1(session->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "h2_session(%ld): destroy", session->id);
+ apr_status_t status;
+
+ ap_assert(session);
+ if (!session->local.accepting) {
+ return APR_SUCCESS;
}
- if (session->pool) {
- apr_pool_destroy(session->pool);
+
+ nghttp2_submit_shutdown_notice(session->ngh2);
+ session->local.accepting = 0;
+ status = nghttp2_session_send(session->ngh2);
+ if (status == APR_SUCCESS) {
+ status = h2_conn_io_flush(&session->io);
}
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03457), session, "sent shutdown notice"));
+ return status;
}
static apr_status_t h2_session_shutdown(h2_session *session, int error,
@@ -726,7 +628,10 @@ static apr_status_t h2_session_shutdown(
{
apr_status_t status = APR_SUCCESS;
- AP_DEBUG_ASSERT(session);
+ ap_assert(session);
+ if (session->local.shutdown) {
+ return APR_SUCCESS;
+ }
if (!msg && error) {
msg = nghttp2_strerror(error);
}
@@ -747,40 +652,33 @@ static apr_status_t h2_session_shutdown(
* we have, but no longer accept new ones. Report the max stream
* we have received and discard all new ones. */
}
- nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
- session->local.accepted_max,
- error, (uint8_t*)msg, msg? strlen(msg):0);
- status = nghttp2_session_send(session->ngh2);
- if (status == APR_SUCCESS) {
- status = h2_conn_io_flush(&session->io);
- }
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03069)
- "session(%ld): sent GOAWAY, err=%d, msg=%s",
- session->id, error, msg? msg : "");
- dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg);
- if (force_close) {
- h2_mplx_abort(session->mplx);
+ session->local.accepting = 0;
+ session->local.shutdown = 1;
+ if (!session->c->aborted) {
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
+ session->local.accepted_max,
+ error, (uint8_t*)msg, msg? strlen(msg):0);
+ status = nghttp2_session_send(session->ngh2);
+ if (status == APR_SUCCESS) {
+ status = h2_conn_io_flush(&session->io);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03069), session,
+ "sent GOAWAY, err=%d, msg=%s"), error, msg? msg : "");
}
-
+ dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg);
return status;
}
-static apr_status_t session_pool_cleanup(void *data)
+static apr_status_t session_cleanup(h2_session *session, const char *trigger)
{
- h2_session *session = data;
- /* On a controlled connection shutdown, this gets never
- * called as we deregister and destroy our pool manually.
- * However when we have an async mpm, and handed it our idle
- * connection, it will just cleanup once the connection is closed
- * from the other side (and sometimes even from out side) and
- * here we arrive then.
- */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "session(%ld): pool_cleanup", session->id);
+ conn_rec *c = session->c;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_SSSN_MSG(session, "pool_cleanup"));
- if (session->state != H2_SESSION_ST_DONE
- && session->state != H2_SESSION_ST_LOCAL_SHUTDOWN) {
+ if (session->state != H2_SESSION_ST_DONE
+ && session->state != H2_SESSION_ST_INIT) {
/* Not good. The connection is being torn down and we have
* not sent a goaway. This is considered a protocol error and
* the client has to assume that any streams "in flight" may have
@@ -789,199 +687,210 @@ static apr_status_t session_pool_cleanup
* connection when sending the next request, this has the effect
* that at least this one request will fail.
*/
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03199)
- "session(%ld): connection disappeared without proper "
- "goodbye, clients will be confused, should not happen",
- session->id);
- }
- /* keep us from destroying the pool, since that is already ongoing. */
- session->pool = NULL;
- h2_session_destroy(session);
- return APR_SUCCESS;
-}
-
-static void *session_malloc(size_t size, void *ctx)
-{
- h2_session *session = ctx;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
- "h2_session(%ld): malloc(%ld)",
- session->id, (long)size);
- return malloc(size);
-}
-
-static void session_free(void *p, void *ctx)
-{
- h2_session *session = ctx;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
- "h2_session(%ld): free()",
- session->id);
- free(p);
-}
-
-static void *session_calloc(size_t n, size_t size, void *ctx)
-{
- h2_session *session = ctx;
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
+ H2_SSSN_LOG(APLOGNO(03199), session,
+ "connection disappeared without proper "
+ "goodbye, clients will be confused, should not happen"));
+ }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
- "h2_session(%ld): calloc(%ld, %ld)",
- session->id, (long)n, (long)size);
- return calloc(n, size);
+ transit(session, trigger, H2_SESSION_ST_CLEANUP);
+ h2_mplx_release_and_join(session->mplx, session->iowait);
+ session->mplx = NULL;
+
+ ap_assert(session->ngh2);
+ nghttp2_session_del(session->ngh2);
+ session->ngh2 = NULL;
+ h2_ctx_clear(c);
+
+
+ return APR_SUCCESS;
}
-static void *session_realloc(void *p, size_t size, void *ctx)
+static apr_status_t session_pool_cleanup(void *data)
{
- h2_session *session = ctx;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
- "h2_session(%ld): realloc(%ld)",
- session->id, (long)size);
- return realloc(p, size);
+ conn_rec *c = data;
+ h2_session *session;
+ h2_ctx *ctx = h2_ctx_get(c, 0);
+
+ if (ctx && (session = h2_ctx_session_get(ctx))) {
+ /* if the session is still there, now is the last chance
+ * to perform cleanup. Normally, cleanup should have happened
+ * earlier in the connection pre_close. Main reason is that
+ * any ongoing requests on slave connections might still access
+ * data which has, at this time, already been freed. An example
+ * is mod_ssl that uses request hooks. */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
+ H2_SSSN_LOG(APLOGNO(10020), session,
+ "session cleanup triggered by pool cleanup. "
+ "this should have happened earlier already."));
+ return session_cleanup(session, "pool cleanup");
+ }
+ return APR_SUCCESS;
}
-static h2_session *h2_session_create_int(conn_rec *c,
- request_rec *r,
- h2_ctx *ctx,
- h2_workers *workers)
+static apr_status_t h2_session_create_int(h2_session **psession,
+ conn_rec *c,
+ request_rec *r,
+ h2_ctx *ctx,
+ h2_workers *workers)
{
nghttp2_session_callbacks *callbacks = NULL;
nghttp2_option *options = NULL;
+ apr_allocator_t *allocator;
+ apr_thread_mutex_t *mutex;
uint32_t n;
-
apr_pool_t *pool = NULL;
- apr_status_t status = apr_pool_create(&pool, c->pool);
h2_session *session;
+ apr_status_t status;
+ int rv;
+
+ *psession = NULL;
+ status = apr_allocator_create(&allocator);
if (status != APR_SUCCESS) {
- return NULL;
+ return status;
+ }
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ apr_pool_create_ex(&pool, c->pool, NULL, allocator);
+ if (!pool) {
+ apr_allocator_destroy(allocator);
+ return APR_ENOMEM;
}
apr_pool_tag(pool, "h2_session");
-
+ apr_allocator_owner_set(allocator, pool);
+ status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+ apr_allocator_mutex_set(allocator, mutex);
+
session = apr_pcalloc(pool, sizeof(h2_session));
- if (session) {
- int rv;
- nghttp2_mem *mem;
-
- session->id = c->id;
- session->c = c;
- session->r = r;
- session->s = h2_ctx_server_get(ctx);
- session->pool = pool;
- session->config = h2_config_sget(session->s);
- session->workers = workers;
-
- session->state = H2_SESSION_ST_INIT;
- session->local.accepting = 1;
- session->remote.accepting = 1;
-
- apr_pool_pre_cleanup_register(pool, session, session_pool_cleanup);
-
- session->max_stream_count = h2_config_geti(session->config,
- H2_CONF_MAX_STREAMS);
- session->max_stream_mem = h2_config_geti(session->config,
- H2_CONF_STREAM_MAX_MEM);
-
- status = apr_thread_cond_create(&session->iowait, session->pool);
- if (status != APR_SUCCESS) {
- return NULL;
- }
-
- session->streams = h2_ihash_create(session->pool,
- offsetof(h2_stream, id));
- session->mplx = h2_mplx_create(c, session->pool, session->config,
- session->s->timeout, workers);
-
- h2_mplx_set_consumed_cb(session->mplx, update_window, session);
-
- /* Install the connection input filter that feeds the session */
- session->cin = h2_filter_cin_create(session->pool,
- h2_session_receive, session);
- ap_add_input_filter("H2_IN", session->cin, r, c);
-
- h2_conn_io_init(&session->io, c, session->config);
- session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
-
- status = init_callbacks(c, &callbacks);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927)
- "nghttp2: error in init_callbacks");
- h2_session_destroy(session);
- return NULL;
- }
-
- rv = nghttp2_option_new(&options);
- if (rv != 0) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
- APLOGNO(02928) "nghttp2_option_new: %s",
- nghttp2_strerror(rv));
- h2_session_destroy(session);
- return NULL;
- }
- nghttp2_option_set_peer_max_concurrent_streams(
- options, (uint32_t)session->max_stream_count);
- /* We need to handle window updates ourself, otherwise we
- * get flooded by nghttp2. */
- nghttp2_option_set_no_auto_window_update(options, 1);
-
- if (APLOGctrace6(c)) {
- mem = apr_pcalloc(session->pool, sizeof(nghttp2_mem));
- mem->mem_user_data = session;
- mem->malloc = session_malloc;
- mem->free = session_free;
- mem->calloc = session_calloc;
- mem->realloc = session_realloc;
-
- rv = nghttp2_session_server_new3(&session->ngh2, callbacks,
- session, options, mem);
- }
- else {
- rv = nghttp2_session_server_new2(&session->ngh2, callbacks,
- session, options);
- }
- nghttp2_session_callbacks_del(callbacks);
- nghttp2_option_del(options);
-
- if (rv != 0) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
- APLOGNO(02929) "nghttp2_session_server_new: %s",
- nghttp2_strerror(rv));
- h2_session_destroy(session);
- return NULL;
- }
-
- n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
- session->push_diary = h2_push_diary_create(session->pool, n);
-
- if (APLOGcdebug(c)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03200)
- "h2_session(%ld) created, max_streams=%d, "
- "stream_mem=%d, workers_limit=%d, workers_max=%d, "
- "push_diary(type=%d,N=%d)",
- session->id, (int)session->max_stream_count,
- (int)session->max_stream_mem,
- session->mplx->workers_limit,
- session->mplx->workers_max,
- session->push_diary->dtype,
- (int)session->push_diary->N);
- }
+ if (!session) {
+ return APR_ENOMEM;
+ }
+
+ *psession = session;
+ session->id = c->id;
+ session->c = c;
+ session->r = r;
+ session->s = h2_ctx_server_get(ctx);
+ session->pool = pool;
+ session->config = h2_config_sget(session->s);
+ session->workers = workers;
+
+ session->state = H2_SESSION_ST_INIT;
+ session->local.accepting = 1;
+ session->remote.accepting = 1;
+
+ session->max_stream_count = h2_config_geti(session->config,
+ H2_CONF_MAX_STREAMS);
+ session->max_stream_mem = h2_config_geti(session->config,
+ H2_CONF_STREAM_MAX_MEM);
+
+ status = apr_thread_cond_create(&session->iowait, session->pool);
+ if (status != APR_SUCCESS) {
+ apr_pool_destroy(pool);
+ return status;
+ }
+
+ session->in_pending = h2_iq_create(session->pool, (int)session->max_stream_count);
+ if (session->in_pending == NULL) {
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
}
- return session;
-}
-h2_session *h2_session_create(conn_rec *c, h2_ctx *ctx, h2_workers *workers)
-{
- return h2_session_create_int(c, NULL, ctx, workers);
+ session->in_process = h2_iq_create(session->pool, (int)session->max_stream_count);
+ if (session->in_process == NULL) {
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+
+ session->monitor = apr_pcalloc(pool, sizeof(h2_stream_monitor));
+ if (session->monitor == NULL) {
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+ session->monitor->ctx = session;
+ session->monitor->on_state_enter = on_stream_state_enter;
+ session->monitor->on_state_event = on_stream_state_event;
+ session->monitor->on_event = on_stream_event;
+
+ session->mplx = h2_mplx_create(c, session->pool, session->config,
+ workers);
+
+ /* connection input filter that feeds the session */
+ session->cin = h2_filter_cin_create(session);
+ ap_add_input_filter("H2_IN", session->cin, r, c);
+
+ h2_conn_io_init(&session->io, c, session->config);
+ session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
+
+ status = init_callbacks(c, &callbacks);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927)
+ "nghttp2: error in init_callbacks");
+ apr_pool_destroy(pool);
+ return status;
+ }
+
+ rv = nghttp2_option_new(&options);
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(02928) "nghttp2_option_new: %s",
+ nghttp2_strerror(rv));
+ apr_pool_destroy(pool);
+ return status;
+ }
+ nghttp2_option_set_peer_max_concurrent_streams(
+ options, (uint32_t)session->max_stream_count);
+ /* We need to handle window updates ourself, otherwise we
+ * get flooded by nghttp2. */
+ nghttp2_option_set_no_auto_window_update(options, 1);
+
+ rv = nghttp2_session_server_new2(&session->ngh2, callbacks,
+ session, options);
+ nghttp2_session_callbacks_del(callbacks);
+ nghttp2_option_del(options);
+
+ if (rv != 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ APLOGNO(02929) "nghttp2_session_server_new: %s",
+ nghttp2_strerror(rv));
+ apr_pool_destroy(pool);
+ return APR_ENOMEM;
+ }
+
+ n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
+ session->push_diary = h2_push_diary_create(session->pool, n);
+
+ if (APLOGcdebug(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
+ H2_SSSN_LOG(APLOGNO(03200), session,
+ "created, max_streams=%d, stream_mem=%d, "
+ "workers_limit=%d, workers_max=%d, "
+ "push_diary(type=%d,N=%d)"),
+ (int)session->max_stream_count,
+ (int)session->max_stream_mem,
+ session->mplx->limit_active,
+ session->mplx->max_active,
+ session->push_diary->dtype,
+ (int)session->push_diary->N);
+ }
+
+ apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
+ return APR_SUCCESS;
}
-h2_session *h2_session_rcreate(request_rec *r, h2_ctx *ctx, h2_workers *workers)
+apr_status_t h2_session_create(h2_session **psession,
+ conn_rec *c, h2_ctx *ctx, h2_workers *workers)
{
- return h2_session_create_int(r->connection, r, ctx, workers);
+ return h2_session_create_int(psession, c, NULL, ctx, workers);
}
-void h2_session_eoc_callback(h2_session *session)
+apr_status_t h2_session_rcreate(h2_session **psession,
+ request_rec *r, h2_ctx *ctx, h2_workers *workers)
{
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "session(%ld): cleanup and destroy", session->id);
- apr_pool_cleanup_kill(session->pool, session, session_pool_cleanup);
- h2_session_destroy(session);
+ return h2_session_create_int(psession, r->connection, r, ctx, workers);
}
static apr_status_t h2_session_start(h2_session *session, int *rv)
@@ -991,7 +900,7 @@ static apr_status_t h2_session_start(h2_
size_t slen;
int win_size;
- AP_DEBUG_ASSERT(session);
+ ap_assert(session);
/* Start the conversation by submitting our SETTINGS frame */
*rv = 0;
if (session->r) {
@@ -1029,7 +938,7 @@ static apr_status_t h2_session_start(h2_
}
/* Now we need to auto-open stream 1 for the request we got. */
- stream = h2_session_open_stream(session, 1, 0, NULL);
+ stream = h2_session_open_stream(session, 1, 0);
if (!stream) {
status = APR_EGENERAL;
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
@@ -1038,11 +947,7 @@ static apr_status_t h2_session_start(h2_
return status;
}
- status = h2_stream_set_request(stream, session->r);
- if (status != APR_SUCCESS) {
- return status;
- }
- status = stream_schedule(session, stream, 1);
+ status = h2_stream_set_request_rec(stream, session->r, 1);
if (status != APR_SUCCESS) {
return status;
}
@@ -1059,17 +964,17 @@ static apr_status_t h2_session_start(h2_
++slen;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03201)
- "h2_session(%ld): start, INITIAL_WINDOW_SIZE=%ld, "
- "MAX_CONCURRENT_STREAMS=%d",
- session->id, (long)win_size, (int)session->max_stream_count);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_SSSN_LOG(APLOGNO(03201), session,
+ "start, INITIAL_WINDOW_SIZE=%ld, MAX_CONCURRENT_STREAMS=%d"),
+ (long)win_size, (int)session->max_stream_count);
*rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE,
settings, slen);
if (*rv != 0) {
status = APR_EGENERAL;
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
- APLOGNO(02935) "nghttp2_submit_settings: %s",
- nghttp2_strerror(*rv));
+ H2_SSSN_LOG(APLOGNO(02935), session,
+ "nghttp2_submit_settings: %s"), nghttp2_strerror(*rv));
}
else {
/* use maximum possible value for connection window size. We are only
@@ -1086,7 +991,8 @@ static apr_status_t h2_session_start(h2_
if (*rv != 0) {
status = APR_EGENERAL;
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
- APLOGNO(02970) "nghttp2_submit_window_update: %s",
+ H2_SSSN_LOG(APLOGNO(02970), session,
+ "nghttp2_submit_window_update: %s"),
nghttp2_strerror(*rv));
}
}
@@ -1094,6 +1000,10 @@ static apr_status_t h2_session_start(h2_
return status;
}
+static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
+ h2_headers *headers, apr_off_t len,
+ int eos);
+
static ssize_t stream_data_cb(nghttp2_session *ng2s,
int32_t stream_id,
uint8_t *buf,
@@ -1107,7 +1017,7 @@ static ssize_t stream_data_cb(nghttp2_se
int eos = 0;
apr_status_t status;
h2_stream *stream;
- AP_DEBUG_ASSERT(session);
+ ap_assert(session);
/* The session wants to send more DATA for the stream. We need
* to find out how much of the requested length we can send without
@@ -1119,19 +1029,20 @@ static ssize_t stream_data_cb(nghttp2_se
(void)ng2s;
(void)buf;
(void)source;
- stream = get_stream(session, stream_id);
+ stream = h2_session_stream_get(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
APLOGNO(02937)
- "h2_stream(%ld-%d): data requested but stream not found",
+ "h2_stream(%ld-%d): data_cb, stream not found",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
-
- AP_DEBUG_ASSERT(!h2_stream_is_suspended(stream));
-
- status = h2_stream_out_prepare(stream, &nread, &eos);
+
+ status = h2_stream_out_prepare(stream, &nread, &eos, NULL);
if (nread) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "prepared no_copy, len=%ld, eos=%d"),
+ (long)nread, eos);
*data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;
}
@@ -1140,8 +1051,8 @@ static ssize_t stream_data_cb(nghttp2_se
break;
case APR_ECONNRESET:
- return nghttp2_submit_rst_stream(ng2s, NGHTTP2_FLAG_NONE,
- stream->id, stream->rst_error);
+ case APR_ECONNABORTED:
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
case APR_EAGAIN:
/* If there is no data available, our session will automatically
@@ -1149,96 +1060,62 @@ static ssize_t stream_data_cb(nghttp2_se
* it. Remember at our h2_stream that we need to do this.
*/
nread = 0;
- h2_mplx_suspend_stream(session->mplx, stream->id);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03071)
- "h2_stream(%ld-%d): suspending",
- session->id, (int)stream_id);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03071), stream, "suspending"));
return NGHTTP2_ERR_DEFERRED;
default:
nread = 0;
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
- APLOGNO(02938) "h2_stream(%ld-%d): reading data",
- session->id, (int)stream_id);
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ H2_STRM_LOG(APLOGNO(02938), stream, "reading data"));
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
if (eos) {
- apr_table_t *trailers = h2_stream_get_trailers(stream);
- if (trailers && !apr_is_empty_table(trailers)) {
- h2_ngheader *nh;
- int rv;
-
- nh = h2_util_ngheader_make(stream->pool, trailers);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03072)
- "h2_stream(%ld-%d): submit %d trailers",
- session->id, (int)stream_id,(int) nh->nvlen);
- rv = nghttp2_submit_trailer(ng2s, stream->id, nh->nv, nh->nvlen);
- if (rv < 0) {
- nread = rv;
- }
- *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM;
- }
-
*data_flags |= NGHTTP2_DATA_FLAG_EOF;
}
-
return (ssize_t)nread;
}
-typedef struct {
- nghttp2_nv *nv;
- size_t nvlen;
- size_t offset;
-} nvctx_t;
-
struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
h2_push *push)
{
- apr_status_t status;
h2_stream *stream;
h2_ngheader *ngh;
- int nid;
+ apr_status_t status;
+ int nid = 0;
- ngh = h2_util_ngheader_make_req(is->pool, push->req);
- nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id,
- ngh->nv, ngh->nvlen, NULL);
- if (nid <= 0) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03075)
- "h2_stream(%ld-%d): submitting push promise fail: %s",
- session->id, is->id, nghttp2_strerror(nid));
+ status = h2_req_create_ngheader(&ngh, is->pool, push->req);
+ if (status == APR_SUCCESS) {
+ nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id,
+ ngh->nv, ngh->nvlen, NULL);
+ }
+ if (status != APR_SUCCESS || nid <= 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(03075), is,
+ "submitting push promise fail: %s"), nghttp2_strerror(nid));
return NULL;
}
++session->pushes_promised;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03076)
- "h2_stream(%ld-%d): SERVER_PUSH %d for %s %s on %d",
- session->id, is->id, nid,
- push->req->method, push->req->path, is->id);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03076), is, "SERVER_PUSH %d for %s %s on %d"),
+ nid, push->req->method, push->req->path, is->id);
- stream = h2_session_open_stream(session, nid, is->id, push->req);
- if (stream) {
- status = stream_schedule(session, stream, 1);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
- "h2_stream(%ld-%d): scheduling push stream",
- session->id, stream->id);
- stream = NULL;
- }
- ++session->unsent_promises;
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03077)
- "h2_stream(%ld-%d): failed to create stream obj %d",
- session->id, is->id, nid);
- }
-
+ stream = h2_session_open_stream(session, nid, is->id);
if (!stream) {
- /* try to tell the client that it should not wait. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03077), stream,
+ "failed to create stream obj %d"), nid);
+ /* kill the push_promise */
nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid,
NGHTTP2_INTERNAL_ERROR);
+ return NULL;
}
+ h2_session_set_prio(session, stream, push->priority);
+ h2_stream_set_request(stream, push->req);
+ ++session->unsent_promises;
return stream;
}
@@ -1256,19 +1133,23 @@ apr_status_t h2_session_set_prio(h2_sess
#ifdef H2_NG2_CHANGE_PRIO
nghttp2_stream *s_grandpa, *s_parent, *s;
+ if (prio == NULL) {
+ /* we treat this as a NOP */
+ return APR_SUCCESS;
+ }
s = nghttp2_session_find_stream(session->ngh2, stream->id);
if (!s) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "h2_stream(%ld-%d): lookup of nghttp2_stream failed",
- session->id, stream->id);
+ H2_STRM_MSG(stream, "lookup of nghttp2_stream failed"));
return APR_EINVAL;
}
s_parent = nghttp2_stream_get_parent(s);
if (s_parent) {
nghttp2_priority_spec ps;
- int id_parent, id_grandpa, w_parent, w, rv = 0;
- char *ptype = "AFTER";
+ int id_parent, id_grandpa, w_parent, w;
+ int rv = 0;
+ const char *ptype = "AFTER";
h2_dependency dep = prio->dependency;
id_parent = nghttp2_stream_get_stream_id(s_parent);
@@ -1329,11 +1210,10 @@ apr_status_t h2_session_set_prio(h2_sess
rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03203)
- "h2_stream(%ld-%d): PUSH %s, weight=%d, "
- "depends=%d, returned=%d",
- session->id, stream->id, ptype,
- ps.weight, ps.stream_id, rv);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ""H2_STRM_LOG(APLOGNO(03203), stream,
+ "PUSH %s, weight=%d, depends=%d, returned=%d"),
+ ptype, ps.weight, ps.stream_id, rv);
status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
}
#else
@@ -1372,7 +1252,7 @@ static apr_status_t h2_session_send(h2_s
apr_socket_timeout_set(socket, saved_timeout);
}
session->have_written = 1;
- if (rv != 0) {
+ if (rv != 0 && rv != NGHTTP2_ERR_WOULDBLOCK) {
if (nghttp2_is_fatal(rv)) {
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
return APR_EGENERAL;
@@ -1386,63 +1266,52 @@ static apr_status_t h2_session_send(h2_s
}
/**
- * A stream was resumed as new output data arrived.
+ * headers for the stream are ready.
*/
-static apr_status_t on_stream_resume(void *ctx, int stream_id)
+static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
+ h2_headers *headers, apr_off_t len,
+ int eos)
{
- h2_session *session = ctx;
- h2_stream *stream = get_stream(session, stream_id);
apr_status_t status = APR_SUCCESS;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_stream(%ld-%d): on_resume", session->id, stream_id);
- if (stream) {
- int rv = nghttp2_session_resume_data(session->ngh2, stream_id);
- session->have_written = 1;
- ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
- APLOG_ERR : APLOG_DEBUG, 0, session->c,
- APLOGNO(02936)
- "h2_stream(%ld-%d): resuming %s",
- session->id, stream->id, rv? nghttp2_strerror(rv) : "");
- }
- return status;
-}
-
-/**
- * A response for the stream is ready.
- */
-static apr_status_t on_stream_response(void *ctx, int stream_id)
-{
- h2_session *session = ctx;
- h2_stream *stream = get_stream(session, stream_id);
- apr_status_t status = APR_SUCCESS;
- h2_response *response;
int rv = 0;
- AP_DEBUG_ASSERT(session);
+ ap_assert(session);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_stream(%ld-%d): on_response", session->id, stream_id);
- if (!stream) {
- return APR_NOTFOUND;
- }
-
- response = h2_stream_get_response(stream);
- AP_DEBUG_ASSERT(response || stream->rst_error);
-
- if (stream->submitted) {
- rv = NGHTTP2_PROTOCOL_ERROR;
+ H2_STRM_MSG(stream, "on_headers"));
+ if (headers->status < 100) {
+ h2_stream_rst(stream, headers->status);
+ goto leave;
+ }
+ else if (stream->has_response) {
+ h2_ngheader *nh;
+
+ status = h2_res_create_ngtrailer(&nh, stream->pool, headers);
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
+ (int)nh->nvlen);
+ if (status == APR_SUCCESS) {
+ rv = nghttp2_submit_trailer(session->ngh2, stream->id,
+ nh->nv, nh->nvlen);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ }
+ goto leave;
}
- else if (response && response->headers) {
+ else {
nghttp2_data_provider provider, *pprovider = NULL;
h2_ngheader *ngh;
- const h2_priority *prio;
+ const char *note;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03073)
- "h2_stream(%ld-%d): submit response %d, REMOTE_WINDOW_SIZE=%u",
- session->id, stream->id, response->http_status,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03073), stream, "submit response %d, REMOTE_WINDOW_SIZE=%u"),
+ headers->status,
(unsigned int)nghttp2_session_get_stream_remote_window_size(session->ngh2, stream->id));
- if (response->content_length != 0) {
+ if (!eos || len > 0) {
memset(&provider, 0, sizeof(provider));
provider.source.fd = stream->id;
provider.read_callback = stream_data_cb;
@@ -1451,7 +1320,7 @@ static apr_status_t on_stream_response(v
/* If this stream is not a pushed one itself,
* and HTTP/2 server push is enabled here,
- * and the response is in the range 200-299 *),
+ * and the response HTTP status is not sth >= 400,
* and the remote side has pushing enabled,
* -> find and perform any pushes on this stream
* *before* we submit the stream response itself.
@@ -1459,50 +1328,70 @@ static apr_status_t on_stream_response(v
* headers that get pushed right afterwards.
*
* *) the response code is relevant, as we do not want to
- * make pushes on 401 or 403 codes, neiterh on 301/302
- * and friends. And if we see a 304, we do not push either
+ * make pushes on 401 or 403 codes and friends.
+ * And if we see a 304, we do not push either
* as the client, having this resource in its cache, might
* also have the pushed ones as well.
*/
- if (stream->request && !stream->request->initiated_on
- && H2_HTTP_2XX(response->http_status)
+ if (!stream->initiated_on
+ && !stream->has_response
+ && stream->request && stream->request->method
+ && !strcmp("GET", stream->request->method)
+ && (headers->status < 400)
+ && (headers->status != 304)
&& h2_session_push_enabled(session)) {
- h2_stream_submit_pushes(stream);
+ h2_stream_submit_pushes(stream, headers);
}
- prio = h2_stream_get_priority(stream);
- if (prio) {
- h2_session_set_prio(session, stream, prio);
- /* no showstopper if that fails for some reason */
+ if (!stream->pref_priority) {
+ stream->pref_priority = h2_stream_get_priority(stream, headers);
}
+ h2_session_set_prio(session, stream, stream->pref_priority);
- ngh = h2_util_ngheader_make_res(stream->pool, response->http_status,
- response->headers);
- rv = nghttp2_submit_response(session->ngh2, response->stream_id,
- ngh->nv, ngh->nvlen, pprovider);
- }
- else {
- int err = H2_STREAM_RST(stream, H2_ERR_PROTOCOL_ERROR);
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03074)
- "h2_stream(%ld-%d): RST_STREAM, err=%d",
- session->id, stream->id, err);
+ note = apr_table_get(headers->notes, H2_FILTER_DEBUG_NOTE);
+ if (note && !strcmp("on", note)) {
+ int32_t connFlowIn, connFlowOut;
- rv = nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
- stream->id, err);
- }
-
- stream->submitted = 1;
- session->have_written = 1;
-
- if (stream->request && stream->request->initiated_on) {
- ++session->pushes_submitted;
- }
- else {
- ++session->responses_submitted;
+ connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
+ connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
+ headers = h2_headers_copy(stream->pool, headers);
+ apr_table_setn(headers->headers, "conn-flow-in",
+ apr_itoa(stream->pool, connFlowIn));
+ apr_table_setn(headers->headers, "conn-flow-out",
+ apr_itoa(stream->pool, connFlowOut));
+ }
+
+ if (headers->status == 103
+ && !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) {
+ /* suppress sending this to the client, it might have triggered
+ * pushes and served its purpose nevertheless */
+ rv = 0;
+ goto leave;
+ }
+
+ status = h2_res_create_ngheader(&ngh, stream->pool, headers);
+ if (status == APR_SUCCESS) {
+ rv = nghttp2_submit_response(session->ngh2, stream->id,
+ ngh->nv, ngh->nvlen, pprovider);
+ stream->has_response = h2_headers_are_response(headers);
+ session->have_written = 1;
+
+ if (stream->initiated_on) {
+ ++session->pushes_submitted;
+ }
+ else {
+ ++session->responses_submitted;
+ }
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ }
}
+leave:
if (nghttp2_is_fatal(rv)) {
status = APR_EGENERAL;
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
@@ -1527,32 +1416,80 @@ static apr_status_t on_stream_response(v
return status;
}
-static apr_status_t h2_session_receive(void *ctx, const char *data,
- apr_size_t len, apr_size_t *readlen)
+/**
+ * A stream was resumed as new response/output data arrived.
+ */
+static apr_status_t on_stream_resume(void *ctx, h2_stream *stream)
{
h2_session *session = ctx;
- ssize_t n;
+ apr_status_t status = APR_EAGAIN;
+ int rv;
+ apr_off_t len = 0;
+ int eos = 0;
+ h2_headers *headers;
+
+ ap_assert(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "on_resume"));
+
+send_headers:
+ headers = NULL;
+ status = h2_stream_out_prepare(stream, &len, &eos, &headers);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ H2_STRM_MSG(stream, "prepared len=%ld, eos=%d"),
+ (long)len, eos);
+ if (headers) {
+ status = on_stream_headers(session, stream, headers, len, eos);
+ if (status != APR_SUCCESS || stream->rst_error) {
+ return status;
+ }
+ goto send_headers;
+ }
+ else if (status != APR_EAGAIN) {
+ /* we have DATA to send */
+ if (!stream->has_response) {
+ /* but no response */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03466), stream,
+ "no response, RST_STREAM"));
+ h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
+ return APR_SUCCESS;
+ }
+ rv = nghttp2_session_resume_data(session->ngh2, stream->id);
+ session->have_written = 1;
+ ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
+ APLOG_ERR : APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(02936), stream, "resumed"));
+ }
+ return status;
+}
+
+static void h2_session_in_flush(h2_session *session)
+{
+ int id;
- if (len > 0) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_session(%ld): feeding %ld bytes to nghttp2",
- session->id, (long)len);
- n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
- if (n < 0) {
- if (nghttp2_is_fatal((int)n)) {
- dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, (int)n, nghttp2_strerror(n));
- return APR_EGENERAL;
+ while ((id = h2_iq_shift(session->in_process)) > 0) {
+ h2_stream *stream = h2_session_stream_get(session, id);
+ if (stream) {
+ ap_assert(!stream->scheduled);
+ if (h2_stream_prep_processing(stream) == APR_SUCCESS) {
+ h2_mplx_process(session->mplx, stream, stream_pri_cmp, session);
+ }
+ else {
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
}
}
- else {
- *readlen = n;
- session->io.bytes_read += n;
+ }
+
+ while ((id = h2_iq_shift(session->in_pending)) > 0) {
+ h2_stream *stream = h2_session_stream_get(session, id);
+ if (stream) {
+ h2_stream_flush_input(stream);
}
}
- return APR_SUCCESS;
}
-static apr_status_t h2_session_read(h2_session *session, int block)
+static apr_status_t session_read(h2_session *session, apr_size_t readlen, int block)
{
apr_status_t status, rstatus = APR_EAGAIN;
conn_rec *c = session->c;
@@ -1564,7 +1501,7 @@ static apr_status_t h2_session_read(h2_s
status = ap_get_brigade(c->input_filters,
session->bbtmp, AP_MODE_READBYTES,
block? APR_BLOCK_READ : APR_NONBLOCK_READ,
- APR_BUCKET_BUFF_SIZE);
+ H2MAX(APR_BUCKET_BUFF_SIZE, readlen));
/* get rid of any possible data we do not expect to get */
apr_brigade_cleanup(session->bbtmp);
@@ -1573,7 +1510,7 @@ static apr_status_t h2_session_read(h2_s
/* successful read, reset our idle timers */
rstatus = APR_SUCCESS;
if (block) {
- /* successfull blocked read, try unblocked to
+ /* successful blocked read, try unblocked to
* get more. */
block = 0;
}
@@ -1591,15 +1528,14 @@ static apr_status_t h2_session_read(h2_s
|| APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_EBADF(status)) {
/* common status for a client that has left */
- ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
- "h2_session(%ld): input gone", session->id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
+ H2_SSSN_MSG(session, "input gone"));
}
else {
/* uncommon status, log on INFO so that we see this */
ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
- APLOGNO(02950)
- "h2_session(%ld): error reading, terminating",
- session->id);
+ H2_SSSN_LOG(APLOGNO(02950), session,
+ "error reading, terminating"));
}
return status;
}
@@ -1607,51 +1543,23 @@ static apr_status_t h2_session_read(h2_s
* status. */
return rstatus;
}
- if (!is_accepting_streams(session)) {
- break;
- }
- if ((session->io.bytes_read - read_start) > (64*1024)) {
+ if ((session->io.bytes_read - read_start) > readlen) {
/* read enough in one go, give write a chance */
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
- "h2_session(%ld): read 64k, returning", session->id);
+ H2_SSSN_MSG(session, "read enough, returning"));
break;
}
}
return rstatus;
}
-static int unsubmitted_iter(void *ctx, void *val)
-{
- h2_stream *stream = val;
- if (h2_stream_needs_submit(stream)) {
- *((int *)ctx) = 1;
- return 0;
- }
- return 1;
-}
-
-static int has_unsubmitted_streams(h2_session *session)
-{
- int has_unsubmitted = 0;
- h2_ihash_iter(session->streams, unsubmitted_iter, &has_unsubmitted);
- return has_unsubmitted;
-}
-
-static int suspended_iter(void *ctx, void *val)
-{
- h2_stream *stream = val;
- if (h2_stream_is_suspended(stream)) {
- *((int *)ctx) = 1;
- return 0;
- }
- return 1;
-}
-
-static int has_suspended_streams(h2_session *session)
+static apr_status_t h2_session_read(h2_session *session, int block)
{
- int has_suspended = 0;
- h2_ihash_iter(session->streams, suspended_iter, &has_suspended);
- return has_suspended;
+ apr_status_t status = session_read(session, session->max_stream_mem
+ * H2MAX(2, session->open_streams),
+ block);
+ h2_session_in_flush(session);
+ return status;
}
static const char *StateNames[] = {
@@ -1660,11 +1568,10 @@ static const char *StateNames[] = {
"IDLE", /* H2_SESSION_ST_IDLE */
"BUSY", /* H2_SESSION_ST_BUSY */
"WAIT", /* H2_SESSION_ST_WAIT */
- "LSHUTDOWN", /* H2_SESSION_ST_LOCAL_SHUTDOWN */
- "RSHUTDOWN", /* H2_SESSION_ST_REMOTE_SHUTDOWN */
+ "CLEANUP", /* H2_SESSION_ST_CLEANUP */
};
-static const char *state_name(h2_session_state state)
+const char *h2_session_state_str(h2_session_state state)
{
if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
return "unknown";
@@ -1672,18 +1579,6 @@ static const char *state_name(h2_session
return StateNames[state];
}
-static int is_accepting_streams(h2_session *session)
-{
- switch (session->state) {
- case H2_SESSION_ST_IDLE:
- case H2_SESSION_ST_BUSY:
- case H2_SESSION_ST_WAIT:
- return 1;
- default:
- return 0;
- }
-}
-
static void update_child_status(h2_session *session, int status, const char *msg)
{
/* Assume that we also change code/msg when something really happened and
@@ -1705,9 +1600,16 @@ static void update_child_status(h2_sessi
static void transit(h2_session *session, const char *action, h2_session_state nstate)
{
if (session->state != nstate) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03078)
- "h2_session(%ld): transit [%s] -- %s --> [%s]", session->id,
- state_name(session->state), action, state_name(nstate));
+ int loglvl = APLOG_DEBUG;
+ if ((session->state == H2_SESSION_ST_BUSY && nstate == H2_SESSION_ST_WAIT)
+ || (session->state == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){
+ loglvl = APLOG_TRACE1;
+ }
+ ap_log_cerror(APLOG_MARK, loglvl, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03078), session,
+ "transit [%s] -- %s --> [%s]"),
+ h2_session_state_str(session->state), action,
+ h2_session_state_str(nstate));
session->state = nstate;
switch (session->state) {
case H2_SESSION_ST_IDLE:
@@ -1715,12 +1617,6 @@ static void transit(h2_session *session,
SERVER_BUSY_KEEPALIVE
: SERVER_BUSY_READ), "idle");
break;
- case H2_SESSION_ST_REMOTE_SHUTDOWN:
- update_child_status(session, SERVER_CLOSING, "remote goaway");
- break;
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
- update_child_status(session, SERVER_CLOSING, "local goaway");
- break;
case H2_SESSION_ST_DONE:
update_child_status(session, SERVER_CLOSING, "done");
break;
@@ -1745,39 +1641,22 @@ static void h2_session_ev_init(h2_sessio
static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg)
{
- session->local.accepting = 0;
- cleanup_streams(session);
- switch (session->state) {
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
- /* already did that? */
- break;
- case H2_SESSION_ST_IDLE:
- case H2_SESSION_ST_REMOTE_SHUTDOWN:
- /* all done */
- transit(session, "local goaway", H2_SESSION_ST_DONE);
- break;
- default:
- transit(session, "local goaway", H2_SESSION_ST_LOCAL_SHUTDOWN);
- break;
+ cleanup_unprocessed_streams(session);
+ if (!session->remote.shutdown) {
+ update_child_status(session, SERVER_CLOSING, "local goaway");
}
+ transit(session, "local goaway", H2_SESSION_ST_DONE);
}
static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char *msg)
{
- session->remote.accepting = 0;
- cleanup_streams(session);
- switch (session->state) {
- case H2_SESSION_ST_REMOTE_SHUTDOWN:
- /* already received that? */
- break;
- case H2_SESSION_ST_IDLE:
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
- /* all done */
- transit(session, "remote goaway", H2_SESSION_ST_DONE);
- break;
- default:
- transit(session, "remote goaway", H2_SESSION_ST_REMOTE_SHUTDOWN);
- break;
+ if (!session->remote.shutdown) {
+ session->remote.error = arg;
+ session->remote.accepting = 0;
+ session->remote.shutdown = 1;
+ cleanup_unprocessed_streams(session);
+ update_child_status(session, SERVER_CLOSING, "remote goaway");
+ transit(session, "remote goaway", H2_SESSION_ST_DONE);
}
}
@@ -1786,14 +1665,14 @@ static void h2_session_ev_conn_error(h2_
switch (session->state) {
case H2_SESSION_ST_INIT:
case H2_SESSION_ST_DONE:
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
/* just leave */
transit(session, "conn error", H2_SESSION_ST_DONE);
break;
default:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03401)
- "h2_session(%ld): conn error -> shutdown", session->id);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03401), session,
+ "conn error -> shutdown"));
h2_session_shutdown(session, arg, msg, 0);
break;
}
@@ -1801,31 +1680,19 @@ static void h2_session_ev_conn_error(h2_
static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg)
{
- switch (session->state) {
- case H2_SESSION_ST_DONE:
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
- /* just leave */
- transit(session, "proto error", H2_SESSION_ST_DONE);
- break;
-
- default:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03402)
- "h2_session(%ld): proto error -> shutdown", session->id);
- h2_session_shutdown(session, arg, msg, 0);
- break;
+ if (!session->local.shutdown) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_SSSN_LOG(APLOGNO(03402), session,
+ "proto error -> shutdown"));
+ h2_session_shutdown(session, arg, msg, 0);
}
}
static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char *msg)
{
- switch (session->state) {
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
- transit(session, "conn timeout", H2_SESSION_ST_DONE);
- break;
- default:
- h2_session_shutdown(session, arg, msg, 1);
- transit(session, "conn timeout", H2_SESSION_ST_DONE);
- break;
+ transit(session, msg, H2_SESSION_ST_DONE);
+ if (!session->local.shutdown) {
+ h2_session_shutdown(session, arg, msg, 1);
}
}
@@ -1833,8 +1700,6 @@ static void h2_session_ev_no_io(h2_sessi
{
switch (session->state) {
case H2_SESSION_ST_BUSY:
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
- case H2_SESSION_ST_REMOTE_SHUTDOWN:
/* Nothing to READ, nothing to WRITE on the master connection.
* Possible causes:
* - we wait for the client to send us sth
@@ -1842,11 +1707,11 @@ static void h2_session_ev_no_io(h2_sessi
* - we have finished all streams and the client has sent GO_AWAY
*/
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "h2_session(%ld): NO_IO event, %d streams open",
- session->id, session->open_streams);
+ H2_SSSN_MSG(session, "NO_IO event, %d streams open"),
+ session->open_streams);
+ h2_conn_io_flush(&session->io);
if (session->open_streams > 0) {
- if (has_unsubmitted_streams(session)
- || has_suspended_streams(session)) {
+ if (h2_mplx_awaits_data(session->mplx)) {
/* waiting for at least one stream to produce data */
transit(session, "no io", H2_SESSION_ST_WAIT);
}
@@ -1867,7 +1732,7 @@ static void h2_session_ev_no_io(h2_sessi
}
}
}
- else if (is_accepting_streams(session)) {
+ else if (session->local.accepting) {
/* When we have no streams, but accept new, switch to idle */
apr_time_t now = apr_time_now();
transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE);
@@ -1889,18 +1754,6 @@ static void h2_session_ev_no_io(h2_sessi
}
}
-static void h2_session_ev_stream_ready(h2_session *session, int arg, const char *msg)
-{
- switch (session->state) {
- case H2_SESSION_ST_WAIT:
- transit(session, "stream ready", H2_SESSION_ST_BUSY);
- break;
- default:
- /* nop */
- break;
- }
-}
-
static void h2_session_ev_data_read(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
@@ -1930,35 +1783,26 @@ static void h2_session_ev_mpm_stopping(h
{
switch (session->state) {
case H2_SESSION_ST_DONE:
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
/* nop */
break;
default:
- h2_session_shutdown(session, arg, msg, 0);
+ h2_session_shutdown_notice(session);
break;
}
}
static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg)
{
- switch (session->state) {
- case H2_SESSION_ST_DONE:
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
- /* nop */
- break;
- default:
- h2_session_shutdown(session, arg, msg, 1);
- break;
- }
+ h2_session_shutdown(session, arg, msg, 1);
}
-static void h2_session_ev_stream_open(h2_session *session, int arg, const char *msg)
+static void ev_stream_open(h2_session *session, h2_stream *stream)
{
- ++session->open_streams;
+ h2_iq_append(session->in_process, stream->id);
switch (session->state) {
case H2_SESSION_ST_IDLE:
if (session->open_streams == 1) {
- /* enter tiomeout, since we have a stream again */
+ /* enter timeout, since we have a stream again */
session->idle_until = (session->s->timeout + apr_time_now());
}
break;
@@ -1967,9 +1811,14 @@ static void h2_session_ev_stream_open(h2
}
}
-static void h2_session_ev_stream_done(h2_session *session, int arg, const char *msg)
+static void ev_stream_closed(h2_session *session, h2_stream *stream)
{
- --session->open_streams;
+ apr_bucket *b;
+
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)
+ && (stream->id > session->local.completed_max)) {
+ session->local.completed_max = stream->id;
+ }
switch (session->state) {
case H2_SESSION_ST_IDLE:
if (session->open_streams == 0) {
@@ -1981,6 +1830,90 @@ static void h2_session_ev_stream_done(h2
default:
break;
}
+
+ /* The stream might have data in the buffers of the main connection.
+ * We can only free the allocated resources once all had been written.
+ * Send a special buckets on the connection that gets destroyed when
+ * all preceding data has been handled. On its destruction, it is safe
+ * to purge all resources of the stream. */
+ b = h2_bucket_eos_create(session->c->bucket_alloc, stream);
+ APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
+ h2_conn_io_pass(&session->io, session->bbtmp);
+ apr_brigade_cleanup(session->bbtmp);
+}
+
+static void on_stream_state_enter(void *ctx, h2_stream *stream)
+{
+ h2_session *session = ctx;
+ /* stream entered a new state */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ H2_STRM_MSG(stream, "entered state"));
+ switch (stream->state) {
+ case H2_SS_IDLE: /* stream was created */
+ ++session->open_streams;
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
+ ++session->remote.emitted_count;
+ if (stream->id > session->remote.emitted_max) {
+ session->remote.emitted_max = stream->id;
+ session->local.accepted_max = stream->id;
+ }
+ }
+ else {
+ if (stream->id > session->local.emitted_max) {
+ ++session->local.emitted_count;
+ session->remote.emitted_max = stream->id;
+ }
+ }
+ break;
+ case H2_SS_OPEN: /* stream has request headers */
+ case H2_SS_RSVD_L: /* stream has request headers */
+ ev_stream_open(session, stream);
+ break;
+ case H2_SS_CLOSED_L: /* stream output was closed */
+ break;
+ case H2_SS_CLOSED_R: /* stream input was closed */
+ break;
+ case H2_SS_CLOSED: /* stream in+out were closed */
+ --session->open_streams;
+ ev_stream_closed(session, stream);
+ break;
+ case H2_SS_CLEANUP:
+ h2_mplx_stream_cleanup(session->mplx, stream);
+ break;
+ default:
+ break;
+ }
+}
+
+static void on_stream_event(void *ctx, h2_stream *stream,
+ h2_stream_event_t ev)
+{
+ h2_session *session = ctx;
+ switch (ev) {
+ case H2_SEV_IN_DATA_PENDING:
+ h2_iq_append(session->in_pending, stream->id);
+ break;
+ default:
+ /* NOP */
+ break;
+ }
+}
+
+static void on_stream_state_event(void *ctx, h2_stream *stream,
+ h2_stream_event_t ev)
+{
+ h2_session *session = ctx;
+ switch (ev) {
+ case H2_SEV_CANCELLED:
+ if (session->state != H2_SESSION_ST_DONE) {
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, stream->rst_error);
+ }
+ break;
+ default:
+ /* NOP */
+ break;
+ }
}
static void dispatch_event(h2_session *session, h2_session_event_t ev,
@@ -2008,9 +1941,6 @@ static void dispatch_event(h2_session *s
case H2_SESSION_EV_NO_IO:
h2_session_ev_no_io(session, arg, msg);
break;
- case H2_SESSION_EV_STREAM_READY:
- h2_session_ev_stream_ready(session, arg, msg);
- break;
case H2_SESSION_EV_DATA_READ:
h2_session_ev_data_read(session, arg, msg);
break;
@@ -2023,22 +1953,30 @@ static void dispatch_event(h2_session *s
case H2_SESSION_EV_PRE_CLOSE:
h2_session_ev_pre_close(session, arg, msg);
break;
- case H2_SESSION_EV_STREAM_OPEN:
- h2_session_ev_stream_open(session, arg, msg);
- break;
- case H2_SESSION_EV_STREAM_DONE:
- h2_session_ev_stream_done(session, arg, msg);
- break;
default:
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "h2_session(%ld): unknown event %d",
- session->id, ev);
+ H2_SSSN_MSG(session, "unknown event %d"), ev);
break;
}
+}
+
+/* trigger window updates, stream resumes and submits */
+static apr_status_t dispatch_master(h2_session *session) {
+ apr_status_t status;
- if (session->state == H2_SESSION_ST_DONE) {
- h2_mplx_abort(session->mplx);
+ status = h2_mplx_dispatch_master_events(session->mplx,
+ on_stream_resume, session);
+ if (status == APR_EAGAIN) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ H2_SSSN_MSG(session, "no master event available"));
+ }
+ else if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
+ H2_SSSN_MSG(session, "dispatch error"));
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "dispatch error");
}
+ return status;
}
static const int MAX_WAIT_MICROS = 200 * 1000;
@@ -2051,22 +1989,16 @@ apr_status_t h2_session_process(h2_sessi
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): process start, async=%d",
- session->id, async);
+ H2_SSSN_MSG(session, "process start, async=%d"), async);
}
- if (c->cs) {
- c->cs->state = CONN_STATE_WRITE_COMPLETION;
- }
-
- while (1) {
- trace = APLOGctrace3(c);
+ while (session->state != H2_SESSION_ST_DONE) {
session->have_read = session->have_written = 0;
- if (!ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ if (session->local.accepting
+ && !ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
if (mpm_state == AP_MPMQ_STOPPING) {
dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL);
- break;
}
}
@@ -2076,51 +2008,56 @@ apr_status_t h2_session_process(h2_sessi
case H2_SESSION_ST_INIT:
ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
if (!h2_is_acceptable_connection(c, 1)) {
- update_child_status(session, SERVER_BUSY_READ, "inadequate security");
- h2_session_shutdown(session, NGHTTP2_INADEQUATE_SECURITY, NULL, 1);
+ update_child_status(session, SERVER_BUSY_READ,
+ "inadequate security");
+ h2_session_shutdown(session,
+ NGHTTP2_INADEQUATE_SECURITY, NULL, 1);
}
else {
update_child_status(session, SERVER_BUSY_READ, "init");
status = h2_session_start(session, &rv);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03079)
- "h2_session(%ld): started on %s:%d", session->id,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03079), session,
+ "started on %s:%d"),
session->s->server_hostname,
c->local_addr->port);
if (status != APR_SUCCESS) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ dispatch_event(session,
+ H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL);
}
break;
case H2_SESSION_ST_IDLE:
- /* make certain, we send everything before we idle */
+ /* We trust our connection into the default timeout/keepalive
+ * handling of the core filters/mpm iff:
+ * - keep_sync_until is not set
+ * - we have an async mpm
+ * - we have no open streams to process
+ * - we are not sitting on a Upgrade: request
+ * - we already have seen at least one request
+ */
if (!session->keep_sync_until && async && !session->open_streams
&& !session->r && session->remote.emitted_count) {
if (trace) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): async idle, nonblock read, "
- "%d streams open", session->id,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session,
+ "nonblock read, %d streams open"),
session->open_streams);
}
- /* We do not return to the async mpm immediately, since under
- * load, mpms show the tendency to throw keep_alive connections
- * away very rapidly.
- * So, if we are still processing streams, we wait for the
- * normal timeout first and, on timeout, close.
- * If we have no streams, we still wait a short amount of
- * time here for the next frame to arrive, before handing
- * it to keep_alive processing of the mpm.
- */
+ h2_conn_io_flush(&session->io);
status = h2_session_read(session, 0);
if (status == APR_SUCCESS) {
session->have_read = 1;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
- else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
+ else if (APR_STATUS_IS_EAGAIN(status)
+ || APR_STATUS_IS_TIMEUP(status)) {
if (apr_time_now() > session->idle_until) {
- dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
+ dispatch_event(session,
+ H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
}
else {
status = APR_EAGAIN;
@@ -2128,25 +2065,31 @@ apr_status_t h2_session_process(h2_sessi
}
}
else {
- ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
- APLOGNO(03403)
- "h2_session(%ld): idle, no data, error",
- session->id);
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "timeout");
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03403), session,
+ "no data, error"));
+ dispatch_event(session,
+ H2_SESSION_EV_CONN_ERROR, 0, "timeout");
}
}
else {
+ /* make certain, we send everything before we idle */
+ h2_conn_io_flush(&session->io);
if (trace) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): sync idle, stutter 1-sec, "
- "%d streams open", session->id,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session,
+ "sync, stutter 1-sec, %d streams open"),
session->open_streams);
}
/* We wait in smaller increments, using a 1 second timeout.
* That gives us the chance to check for MPMQ_STOPPING often.
*/
status = h2_mplx_idle(session->mplx);
- if (status != APR_SUCCESS) {
+ if (status == APR_EAGAIN) {
+ dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
+ break;
+ }
+ else if (status != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
H2_ERR_ENHANCE_YOUR_CALM, "less is more");
}
@@ -2168,34 +2111,39 @@ apr_status_t h2_session_process(h2_sessi
}
if (now > session->idle_until) {
if (trace) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): keepalive timeout",
- session->id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session,
+ "keepalive timeout"));
}
- dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
+ dispatch_event(session,
+ H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
}
else if (trace) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): keepalive, %f sec left",
- session->id, (session->idle_until - now) / 1000000.0f);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session,
+ "keepalive, %f sec left"),
+ (session->idle_until - now) / 1000000.0f);
}
/* continue reading handling */
}
+ else if (APR_STATUS_IS_ECONNABORTED(status)
+ || APR_STATUS_IS_ECONNRESET(status)
+ || APR_STATUS_IS_EOF(status)
+ || APR_STATUS_IS_EBADF(status)) {
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session, "input gone"));
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
else {
- if (trace) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): idle(1 sec timeout) "
- "read failed", session->id);
- }
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
+ H2_SSSN_MSG(session,
+ "(1 sec timeout) read failed"));
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error");
}
}
-
break;
case H2_SESSION_ST_BUSY:
- case H2_SESSION_ST_LOCAL_SHUTDOWN:
- case H2_SESSION_ST_REMOTE_SHUTDOWN:
if (nghttp2_session_want_read(session->ngh2)) {
ap_update_child_status(session->c->sbh, SERVER_BUSY_READ, NULL);
h2_filter_cin_timeout_set(session->cin, session->s->timeout);
@@ -2215,25 +2163,18 @@ apr_status_t h2_session_process(h2_sessi
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
}
-
- /* trigger window updates, stream resumes and submits */
- status = h2_mplx_dispatch_master_events(session->mplx,
- on_stream_resume,
- on_stream_response,
- session);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): dispatch error",
- session->id);
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_INTERNAL_ERROR,
- "dispatch error");
+
+ status = dispatch_master(session);
+ if (status != APR_SUCCESS && status != APR_EAGAIN) {
break;
}
if (nghttp2_session_want_write(session->ngh2)) {
ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
status = h2_session_send(session);
+ if (status == APR_SUCCESS) {
+ status = h2_conn_io_flush(&session->io);
+ }
if (status != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
H2_ERR_INTERNAL_ERROR, "writing");
@@ -2254,21 +2195,11 @@ apr_status_t h2_session_process(h2_sessi
case H2_SESSION_ST_WAIT:
if (session->wait_us <= 0) {
session->wait_us = 10;
- session->start_wait = apr_time_now();
if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
break;
}
}
- else if ((apr_time_now() - session->start_wait) >= session->s->timeout) {
- /* waited long enough */
- if (trace) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, APR_TIMEUP, c,
- "h2_session: wait for data");
- }
- dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
- break;
- }
else {
/* repeating, increase timer for graceful backoff */
session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS);
@@ -2287,8 +2218,8 @@ apr_status_t h2_session_process(h2_sessi
}
else if (APR_STATUS_IS_TIMEUP(status)) {
/* go back to checking all inputs again */
- transit(session, "wait cycle", session->local.accepting?
- H2_SESSION_ST_BUSY : H2_SESSION_ST_LOCAL_SHUTDOWN);
+ transit(session, "wait cycle", session->local.shutdown?
+ H2_SESSION_ST_DONE : H2_SESSION_ST_BUSY);
}
else if (APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_ECONNABORTED(status)) {
@@ -2296,22 +2227,17 @@ apr_status_t h2_session_process(h2_sessi
}
else {
ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c,
- APLOGNO(03404)
- "h2_session(%ld): waiting on conditional",
- session->id);
+ H2_SSSN_LOG(APLOGNO(03404), session,
+ "waiting on conditional"));
h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR,
"cond wait error", 0);
}
break;
- case H2_SESSION_ST_DONE:
- status = APR_EOF;
- goto out;
-
default:
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
- APLOGNO(03080)
- "h2_session(%ld): unknown state %d", session->id, session->state);
+ H2_SSSN_LOG(APLOGNO(03080), session,
+ "unknown state"));
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL);
break;
}
@@ -2329,32 +2255,34 @@ apr_status_t h2_session_process(h2_sessi
out:
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- "h2_session(%ld): [%s] process returns",
- session->id, state_name(session->state));
+ H2_SSSN_MSG(session, "process returns"));
}
if ((session->state != H2_SESSION_ST_DONE)
&& (APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_ECONNABORTED(status))) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
- }
-
- status = (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS;
- if (session->state == H2_SESSION_ST_DONE) {
- if (!session->eoc_written) {
- session->eoc_written = 1;
- h2_conn_io_write_eoc(&session->io, session);
- }
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
-
- return status;
+
+ return (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS;
}
apr_status_t h2_session_pre_close(h2_session *session, int async)
{
+ apr_status_t status;
+
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- "h2_session(%ld): pre_close", session->id);
- dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0, "timeout");
- return APR_SUCCESS;
+ H2_SSSN_MSG(session, "pre_close"));
+ dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0,
+ (session->state == H2_SESSION_ST_IDLE)? "timeout" : NULL);
+ status = session_cleanup(session, "pre_close");
+ if (status == APR_SUCCESS) {
+ /* no one should hold a reference to this session any longer and
+ * the h2_ctx was removed from the connection.
+ * Take the pool (and thus all subpools etc. down now, instead of
+ * during cleanup of main connection pool. */
+ apr_pool_destroy(session->pool);
+ }
+ return status;
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_session.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_session.h
--- httpd-2.4.23/modules/http2/h2_session.h 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_session.h 2017-04-10 17:04:55.000000000 +0200
@@ -31,7 +31,7 @@
* New incoming HEADER frames are converted into a h2_stream+h2_task instance
* that both represent a HTTP/2 stream, but may have separate lifetimes. This
* allows h2_task to be scheduled in other threads without semaphores
- * all over the place. It allows task memory to be freed independant of
+ * all over the place. It allows task memory to be freed independent of
* session lifetime and sessions may close down while tasks are still running.
*
*
@@ -49,9 +49,9 @@ struct h2_mplx;
struct h2_priority;
struct h2_push;
struct h2_push_diary;
-struct h2_response;
struct h2_session;
struct h2_stream;
+struct h2_stream_monitor;
struct h2_task;
struct h2_workers;
@@ -65,13 +65,10 @@ typedef enum {
H2_SESSION_EV_PROTO_ERROR, /* protocol error */
H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */
H2_SESSION_EV_NO_IO, /* nothing has been read or written */
- H2_SESSION_EV_STREAM_READY, /* stream signalled availability of headers/data */
H2_SESSION_EV_DATA_READ, /* connection data has been read */
H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */
H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */
- H2_SESSION_EV_STREAM_OPEN, /* stream has been opened */
- H2_SESSION_EV_STREAM_DONE, /* stream has been handled completely */
} h2_session_event_t;
typedef struct h2_session {
@@ -87,7 +84,6 @@ typedef struct h2_session {
struct h2_workers *workers; /* for executing stream tasks */
struct h2_filter_cin *cin; /* connection input filter context */
h2_conn_io io; /* io on httpd conn filters */
- struct h2_ihash_t *streams; /* streams handled by this session */
struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
h2_session_state state; /* state session is in */
@@ -96,17 +92,17 @@ typedef struct h2_session {
h2_session_props remote; /* properites of remote session */
unsigned int reprioritize : 1; /* scheduled streams priority changed */
- unsigned int eoc_written : 1; /* h2 eoc bucket written */
unsigned int flush : 1; /* flushing output necessary */
unsigned int have_read : 1; /* session has read client data */
unsigned int have_written : 1; /* session did write data to client */
- apr_interval_time_t wait_us; /* timout during BUSY_WAIT state, micro secs */
+ apr_interval_time_t wait_us; /* timeout during BUSY_WAIT state, micro secs */
struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
- int open_streams; /* number of streams open */
+ struct h2_stream_monitor *monitor;/* monitor callbacks for streams */
+ int open_streams; /* number of client streams open */
int unsent_submits; /* number of submitted, but not yet written responses. */
- int unsent_promises; /* number of submitted, but not yet written push promised */
+ int unsent_promises; /* number of submitted, but not yet written push promises */
int responses_submitted; /* number of http/2 responses submitted */
int streams_reset; /* number of http/2 streams reset by client */
@@ -120,7 +116,6 @@ typedef struct h2_session {
apr_size_t max_stream_count; /* max number of open streams */
apr_size_t max_stream_mem; /* max buffer memory for a single stream */
- apr_time_t start_wait; /* Time we started waiting for sth. to happen */
apr_time_t idle_until; /* Time we shut down due to sheer boredom */
apr_time_t keep_sync_until; /* Time we sync wait until passing to async mpm */
@@ -130,34 +125,46 @@ typedef struct h2_session {
char status[64]; /* status message for scoreboard */
int last_status_code; /* the one already reported */
const char *last_status_msg; /* the one already reported */
+
+ struct h2_iqueue *in_pending; /* all streams with input pending */
+ struct h2_iqueue *in_process; /* all streams ready for processing on slave */
+
} h2_session;
+const char *h2_session_state_str(h2_session_state state);
/**
* Create a new h2_session for the given connection.
* The session will apply the configured parameter.
+ * @param psession pointer receiving the created session on success or NULL
* @param c the connection to work on
* @param cfg the module config to apply
* @param workers the worker pool to use
* @return the created session
*/
-h2_session *h2_session_create(conn_rec *c, struct h2_ctx *ctx,
- struct h2_workers *workers);
+apr_status_t h2_session_create(h2_session **psession,
+ conn_rec *c, struct h2_ctx *ctx,
+ struct h2_workers *workers);
/**
* Create a new h2_session for the given request.
* The session will apply the configured parameter.
+ * @param psession pointer receiving the created session on success or NULL
* @param r the request that was upgraded
* @param cfg the module config to apply
* @param workers the worker pool to use
* @return the created session
*/
-h2_session *h2_session_rcreate(request_rec *r, struct h2_ctx *ctx,
- struct h2_workers *workers);
+apr_status_t h2_session_rcreate(h2_session **psession,
+ request_rec *r, struct h2_ctx *ctx,
+ struct h2_workers *workers);
+
+void h2_session_event(h2_session *session, h2_session_event_t ev,
+ int err, const char *msg);
/**
* Process the given HTTP/2 session until it is ended or a fatal
- * error occured.
+ * error occurred.
*
* @param session the sessionm to process
*/
@@ -169,14 +176,7 @@ apr_status_t h2_session_process(h2_sessi
apr_status_t h2_session_pre_close(h2_session *session, int async);
/**
- * Cleanup the session and all objects it still contains. This will not
- * destroy h2_task instances that have not finished yet.
- * @param session the session to destroy
- */
-void h2_session_eoc_callback(h2_session *session);
-
-/**
- * Called when a serious error occured and the session needs to terminate
+ * Called when a serious error occurred and the session needs to terminate
* without further connection io.
* @param session the session to abort
* @param reason the apache status that caused the abort
@@ -188,25 +188,6 @@ void h2_session_abort(h2_session *sessio
*/
void h2_session_close(h2_session *session);
-/* Start submitting the response to a stream request. This is possible
- * once we have all the response headers. */
-apr_status_t h2_session_handle_response(h2_session *session,
- struct h2_stream *stream);
-
-/**
- * Create and register a new stream under the given id.
- *
- * @param session the session to register in
- * @param stream_id the new stream identifier
- * @param initiated_on the stream id this one is initiated on or 0
- * @param req the request for this stream or NULL if not known yet
- * @return the new stream
- */
-struct h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
- int initiated_on,
- const h2_request *req);
-
-
/**
* Returns if client settings have push enabled.
* @param != 0 iff push is enabled in client settings
@@ -214,12 +195,9 @@ struct h2_stream *h2_session_open_stream
int h2_session_push_enabled(h2_session *session);
/**
- * Destroy the stream and release it everywhere. Reclaim all resources.
- * @param session the session to which the stream belongs
- * @param stream the stream to destroy
+ * Look up the stream in this session with the given id.
*/
-apr_status_t h2_session_stream_done(h2_session *session,
- struct h2_stream *stream);
+struct h2_stream *h2_session_stream_get(h2_session *session, int stream_id);
/**
* Submit a push promise on the stream and schedule the new steam for
@@ -237,5 +215,10 @@ apr_status_t h2_session_set_prio(h2_sess
struct h2_stream *stream,
const struct h2_priority *prio);
+#define H2_SSSN_MSG(s, msg) \
+ "h2_session(%ld,%s,%d): "msg, s->id, h2_session_state_str(s->state), \
+ s->open_streams
+
+#define H2_SSSN_LOG(aplogno, s, msg) aplogno H2_SSSN_MSG(s, msg)
#endif /* defined(__mod_h2__h2_session__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_stream.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_stream.c
--- httpd-2.4.23/modules/http2/h2_stream.c 2017-12-27 23:01:33.408186020 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_stream.c 2017-09-08 16:36:31.000000000 +0200
@@ -16,6 +16,8 @@
#include <assert.h>
#include <stddef.h>
+#include <apr_strings.h>
+
#include <httpd.h>
#include <http_core.h>
#include <http_connection.h>
@@ -29,11 +31,10 @@
#include "h2_conn.h"
#include "h2_config.h"
#include "h2_h2.h"
-#include "h2_filter.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
-#include "h2_response.h"
+#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
#include "h2_task.h"
@@ -42,207 +43,546 @@
#include "h2_util.h"
-static int state_transition[][7] = {
- /* ID OP RL RR CI CO CL */
-/*ID*/{ 1, 0, 0, 0, 0, 0, 0 },
-/*OP*/{ 1, 1, 0, 0, 0, 0, 0 },
-/*RL*/{ 0, 0, 1, 0, 0, 0, 0 },
-/*RR*/{ 0, 0, 0, 1, 0, 0, 0 },
-/*CI*/{ 1, 1, 0, 0, 1, 0, 0 },
-/*CO*/{ 1, 1, 0, 0, 0, 1, 0 },
-/*CL*/{ 1, 1, 0, 0, 1, 1, 1 },
+static const char *h2_ss_str(h2_stream_state_t state)
+{
+ switch (state) {
+ case H2_SS_IDLE:
+ return "IDLE";
+ case H2_SS_RSVD_L:
+ return "RESERVED_LOCAL";
+ case H2_SS_RSVD_R:
+ return "RESERVED_REMOTE";
+ case H2_SS_OPEN:
+ return "OPEN";
+ case H2_SS_CLOSED_L:
+ return "HALF_CLOSED_LOCAL";
+ case H2_SS_CLOSED_R:
+ return "HALF_CLOSED_REMOTE";
+ case H2_SS_CLOSED:
+ return "CLOSED";
+ case H2_SS_CLEANUP:
+ return "CLEANUP";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char *h2_stream_state_str(h2_stream *stream)
+{
+ return h2_ss_str(stream->state);
+}
+
+/* Abbreviations for stream transit tables */
+#define S_XXX (-2) /* Programming Error */
+#define S_ERR (-1) /* Protocol Error */
+#define S_NOP (0) /* No Change */
+#define S_IDL (H2_SS_IDL + 1)
+#define S_RS_L (H2_SS_RSVD_L + 1)
+#define S_RS_R (H2_SS_RSVD_R + 1)
+#define S_OPEN (H2_SS_OPEN + 1)
+#define S_CL_L (H2_SS_CLOSED_L + 1)
+#define S_CL_R (H2_SS_CLOSED_R + 1)
+#define S_CLS (H2_SS_CLOSED + 1)
+#define S_CLN (H2_SS_CLEANUP + 1)
+
+/* state transisitions when certain frame types are sent */
+static int trans_on_send[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_ERR, S_ERR, S_ERR, S_NOP, S_NOP, S_ERR, S_NOP, S_NOP, },/* DATA */
+{ S_ERR, S_ERR, S_CL_R, S_NOP, S_NOP, S_ERR, S_NOP, S_NOP, },/* HEADERS */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* PRIORITY */
+{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* RST_STREAM */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* SETTINGS */
+{ S_RS_L,S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PUSH_PROMISE */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PING */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* GOAWAY */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* WINDOW_UPDATE */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* CONT */
+};
+/* state transisitions when certain frame types are received */
+static int trans_on_recv[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_ERR, S_ERR, S_ERR, S_NOP, S_ERR, S_NOP, S_NOP, S_NOP, },/* DATA */
+{ S_OPEN,S_CL_L, S_ERR, S_NOP, S_ERR, S_NOP, S_NOP, S_NOP, },/* HEADERS */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* PRIORITY */
+{ S_ERR, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* RST_STREAM */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* SETTINGS */
+{ S_RS_R,S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PUSH_PROMISE */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* PING */
+{ S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, S_ERR, },/* GOAWAY */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* WINDOW_UPDATE */
+{ S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, S_NOP, },/* CONT */
+};
+/* state transisitions when certain events happen */
+static int trans_on_event[][H2_SS_MAX] = {
+/*S_IDLE,S_RS_R, S_RS_L, S_OPEN, S_CL_R, S_CL_L, S_CLS, S_CLN, */
+{ S_XXX, S_ERR, S_ERR, S_CL_L, S_CLS, S_XXX, S_XXX, S_XXX, },/* EV_CLOSED_L*/
+{ S_ERR, S_ERR, S_ERR, S_CL_R, S_ERR, S_CLS, S_NOP, S_NOP, },/* EV_CLOSED_R*/
+{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* EV_CANCELLED*/
+{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_XXX, },/* EV_EOS_SENT*/
};
-static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, char *tag)
+static int on_map(h2_stream_state_t state, int map[H2_SS_MAX])
+{
+ int op = map[state];
+ switch (op) {
+ case S_XXX:
+ case S_ERR:
+ return op;
+ case S_NOP:
+ return state;
+ default:
+ return op-1;
+ }
+}
+
+static int on_frame(h2_stream_state_t state, int frame_type,
+ int frame_map[][H2_SS_MAX], apr_size_t maxlen)
+{
+ ap_assert(frame_type >= 0);
+ ap_assert(state >= 0);
+ if (frame_type >= maxlen) {
+ return state; /* NOP, ignore unknown frame types */
+ }
+ return on_map(state, frame_map[frame_type]);
+}
+
+static int on_frame_send(h2_stream_state_t state, int frame_type)
+{
+ return on_frame(state, frame_type, trans_on_send, H2_ALEN(trans_on_send));
+}
+
+static int on_frame_recv(h2_stream_state_t state, int frame_type)
+{
+ return on_frame(state, frame_type, trans_on_recv, H2_ALEN(trans_on_recv));
+}
+
+static int on_event(h2_stream* stream, h2_stream_event_t ev)
+{
+ if (stream->monitor && stream->monitor->on_event) {
+ stream->monitor->on_event(stream->monitor->ctx, stream, ev);
+ }
+ if (ev < H2_ALEN(trans_on_event)) {
+ return on_map(stream->state, trans_on_event[ev]);
+ }
+ return stream->state;
+}
+
+static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag)
{
if (APLOG_C_IS_LEVEL(s->session->c, lvl)) {
conn_rec *c = s->session->c;
char buffer[4 * 1024];
- const char *line = "(null)";
apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
- len = h2_util_bb_print(buffer, bmax, tag, "", s->buffer);
- ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%ld-%d): %s",
- c->id, s->id, len? buffer : line);
+ len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer);
+ ap_log_cerror(APLOG_MARK, lvl, 0, c,
+ H2_STRM_MSG(s, "out-buffer(%s)"), len? buffer : "empty");
+ }
+}
+
+static apr_status_t setup_input(h2_stream *stream) {
+ if (stream->input == NULL) {
+ int empty = (stream->input_eof
+ && (!stream->in_buffer
+ || APR_BRIGADE_EMPTY(stream->in_buffer)));
+ if (!empty) {
+ h2_beam_create(&stream->input, stream->pool, stream->id,
+ "input", H2_BEAM_OWNER_SEND, 0,
+ stream->session->s->timeout);
+ h2_beam_send_from(stream->input, stream->pool);
+ }
}
+ return APR_SUCCESS;
}
-static int set_state(h2_stream *stream, h2_stream_state_t state)
+static apr_status_t close_input(h2_stream *stream)
{
- int allowed = state_transition[state][stream->state];
- if (allowed) {
- stream->state = state;
- return 1;
+ conn_rec *c = stream->session->c;
+ apr_status_t status = APR_SUCCESS;
+
+ stream->input_eof = 1;
+ if (stream->input && h2_beam_is_closed(stream->input)) {
+ return APR_SUCCESS;
}
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, APLOGNO(03081)
- "h2_stream(%ld-%d): invalid state transition from %d to %d",
- stream->session->id, stream->id, stream->state, state);
- return 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "closing input"));
+ if (stream->rst_error) {
+ return APR_ECONNRESET;
+ }
+
+ if (stream->trailers && !apr_is_empty_table(stream->trailers)) {
+ apr_bucket *b;
+ h2_headers *r;
+
+ if (!stream->in_buffer) {
+ stream->in_buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
+ }
+
+ r = h2_headers_create(HTTP_OK, stream->trailers, NULL, stream->pool);
+ stream->trailers = NULL;
+ b = h2_bucket_headers_create(c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
+
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ H2_STRM_MSG(stream, "added trailers"));
+ h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
+ }
+ if (stream->input) {
+ h2_stream_flush_input(stream);
+ return h2_beam_close(stream->input);
+ }
+ return status;
}
-static int close_input(h2_stream *stream)
+static apr_status_t close_output(h2_stream *stream)
{
- switch (stream->state) {
- case H2_STREAM_ST_CLOSED_INPUT:
- case H2_STREAM_ST_CLOSED:
- return 0; /* ignore, idempotent */
- case H2_STREAM_ST_CLOSED_OUTPUT:
- /* both closed now */
- set_state(stream, H2_STREAM_ST_CLOSED);
- break;
- default:
- /* everything else we jump to here */
- set_state(stream, H2_STREAM_ST_CLOSED_INPUT);
- break;
+ if (!stream->output || h2_beam_is_closed(stream->output)) {
+ return APR_SUCCESS;
}
- return 1;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "closing output"));
+ return h2_beam_leave(stream->output);
}
-static int input_closed(h2_stream *stream)
+static void on_state_enter(h2_stream *stream)
{
- switch (stream->state) {
- case H2_STREAM_ST_OPEN:
- case H2_STREAM_ST_CLOSED_OUTPUT:
- return 0;
- default:
- return 1;
+ if (stream->monitor && stream->monitor->on_state_enter) {
+ stream->monitor->on_state_enter(stream->monitor->ctx, stream);
+ }
+}
+
+static void on_state_event(h2_stream *stream, h2_stream_event_t ev)
+{
+ if (stream->monitor && stream->monitor->on_state_event) {
+ stream->monitor->on_state_event(stream->monitor->ctx, stream, ev);
}
}
-static int close_output(h2_stream *stream)
+static void on_state_invalid(h2_stream *stream)
{
+ if (stream->monitor && stream->monitor->on_state_invalid) {
+ stream->monitor->on_state_invalid(stream->monitor->ctx, stream);
+ }
+ /* stream got an event/frame invalid in its state */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "invalid state event"));
switch (stream->state) {
- case H2_STREAM_ST_CLOSED_OUTPUT:
- case H2_STREAM_ST_CLOSED:
- return 0; /* ignore, idempotent */
- case H2_STREAM_ST_CLOSED_INPUT:
- /* both closed now */
- set_state(stream, H2_STREAM_ST_CLOSED);
+ case H2_SS_OPEN:
+ case H2_SS_RSVD_L:
+ case H2_SS_RSVD_R:
+ case H2_SS_CLOSED_L:
+ case H2_SS_CLOSED_R:
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
break;
default:
- /* everything else we jump to here */
- set_state(stream, H2_STREAM_ST_CLOSED_OUTPUT);
break;
}
- return 1;
}
-static int input_open(const h2_stream *stream)
+static apr_status_t transit(h2_stream *stream, int new_state)
{
- switch (stream->state) {
- case H2_STREAM_ST_OPEN:
- case H2_STREAM_ST_CLOSED_OUTPUT:
- return 1;
+ if (new_state == stream->state) {
+ return APR_SUCCESS;
+ }
+ else if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
+ H2_STRM_LOG(APLOGNO(03081), stream, "invalid transition"));
+ on_state_invalid(stream);
+ return APR_EINVAL;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "transit to [%s]"), h2_ss_str(new_state));
+ stream->state = new_state;
+ switch (new_state) {
+ case H2_SS_IDLE:
+ break;
+ case H2_SS_RSVD_L:
+ close_input(stream);
+ break;
+ case H2_SS_RSVD_R:
+ break;
+ case H2_SS_OPEN:
+ break;
+ case H2_SS_CLOSED_L:
+ close_output(stream);
+ break;
+ case H2_SS_CLOSED_R:
+ close_input(stream);
+ break;
+ case H2_SS_CLOSED:
+ close_input(stream);
+ close_output(stream);
+ if (stream->out_buffer) {
+ apr_brigade_cleanup(stream->out_buffer);
+ }
+ break;
+ case H2_SS_CLEANUP:
+ break;
+ }
+ on_state_enter(stream);
+ return APR_SUCCESS;
+}
+
+void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor)
+{
+ stream->monitor = monitor;
+}
+
+void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
+{
+ int new_state;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ H2_STRM_MSG(stream, "dispatch event %d"), ev);
+ new_state = on_event(stream, ev);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
+ H2_STRM_LOG(APLOGNO(10002), stream, "invalid event %d"), ev);
+ on_state_invalid(stream);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return;
+ }
+ else if (new_state == stream->state) {
+ /* nop */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ H2_STRM_MSG(stream, "non-state event %d"), ev);
+ return;
+ }
+ else {
+ on_state_event(stream, ev);
+ transit(stream, new_state);
+ }
+}
+
+static void set_policy_for(h2_stream *stream, h2_request *r)
+{
+ int enabled = h2_session_push_enabled(stream->session);
+ stream->push_policy = h2_push_policy_determine(r->headers, stream->pool,
+ enabled);
+ r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS);
+}
+
+apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags)
+{
+ apr_status_t status = APR_SUCCESS;
+ int new_state, eos = 0;
+
+ new_state = on_frame_send(stream->state, ftype);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "invalid frame %d send"), ftype);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return transit(stream, new_state);
+ }
+
+ switch (ftype) {
+ case NGHTTP2_DATA:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_HEADERS:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_PUSH_PROMISE:
+ /* start pushed stream */
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp != NULL);
+ status = h2_request_end_headers(stream->rtmp, stream->pool, 1);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ set_policy_for(stream, stream->rtmp);
+ stream->request = stream->rtmp;
+ stream->rtmp = NULL;
+ break;
+
default:
- return 0;
+ break;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "send frame %d, eos=%d"), ftype, eos);
+ status = transit(stream, new_state);
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
}
+ return status;
}
-static int output_open(h2_stream *stream)
+apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags)
{
- switch (stream->state) {
- case H2_STREAM_ST_OPEN:
- case H2_STREAM_ST_CLOSED_INPUT:
- return 1;
+ apr_status_t status = APR_SUCCESS;
+ int new_state, eos = 0;
+
+ new_state = on_frame_recv(stream->state, ftype);
+ if (new_state < 0) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "invalid frame %d recv"), ftype);
+ AP_DEBUG_ASSERT(new_state > S_XXX);
+ return transit(stream, new_state);
+ }
+
+ switch (ftype) {
+ case NGHTTP2_DATA:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ break;
+
+ case NGHTTP2_HEADERS:
+ eos = (flags & NGHTTP2_FLAG_END_STREAM);
+ if (stream->state == H2_SS_OPEN) {
+ /* trailer HEADER */
+ if (!eos) {
+ h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
+ }
+ }
+ else {
+ /* request HEADER */
+ ap_assert(stream->request == NULL);
+ if (stream->rtmp == NULL) {
+ /* This can only happen, if the stream has received no header
+ * name/value pairs at all. The lastest nghttp2 version have become
+ * pretty good at detecting this early. In any case, we have
+ * to abort the connection here, since this is clearly a protocol error */
+ return APR_EINVAL;
+ }
+ status = h2_request_end_headers(stream->rtmp, stream->pool, eos);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+ set_policy_for(stream, stream->rtmp);
+ stream->request = stream->rtmp;
+ stream->rtmp = NULL;
+ }
+ break;
+
default:
- return 0;
+ break;
+ }
+ status = transit(stream, new_state);
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
}
+ return status;
}
-static apr_status_t stream_pool_cleanup(void *ctx)
+apr_status_t h2_stream_flush_input(h2_stream *stream)
{
- h2_stream *stream = ctx;
- apr_status_t status;
+ apr_status_t status = APR_SUCCESS;
- if (stream->input) {
- h2_beam_destroy(stream->input);
- stream->input = NULL;
+ if (stream->in_buffer && !APR_BRIGADE_EMPTY(stream->in_buffer)) {
+ setup_input(stream);
+ status = h2_beam_send(stream->input, stream->in_buffer, APR_BLOCK_READ);
+ stream->in_last_write = apr_time_now();
}
- if (stream->files) {
- apr_file_t *file;
- int i;
- for (i = 0; i < stream->files->nelts; ++i) {
- file = APR_ARRAY_IDX(stream->files, i, apr_file_t*);
- status = apr_file_close(file);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, stream->session->c,
- "h2_stream(%ld-%d): destroy, closed file %d",
- stream->session->id, stream->id, i);
- }
- stream->files = NULL;
+ if (stream->input_eof
+ && stream->input && !h2_beam_is_closed(stream->input)) {
+ status = h2_beam_close(stream->input);
}
- return APR_SUCCESS;
+ return status;
}
-h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session,
- int initiated_on, const h2_request *creq)
+apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
+ const uint8_t *data, size_t len)
{
- h2_request *req;
- h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
+ h2_session *session = stream->session;
+ apr_status_t status = APR_SUCCESS;
- stream->id = id;
- stream->state = H2_STREAM_ST_IDLE;
- stream->pool = pool;
- stream->session = session;
- set_state(stream, H2_STREAM_ST_OPEN);
-
- if (creq) {
- /* take it into out pool and assure correct id's */
- req = h2_request_clone(pool, creq);
- req->id = id;
- req->initiated_on = initiated_on;
+ stream->in_data_frames++;
+ if (len > 0) {
+ if (APLOGctrace3(session->c)) {
+ const char *load = apr_pstrndup(stream->pool, (const char *)data, len);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, session->c,
+ H2_STRM_MSG(stream, "recv DATA, len=%d: -->%s<--"),
+ (int)len, load);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ H2_STRM_MSG(stream, "recv DATA, len=%d"), (int)len);
+ }
+ stream->in_data_octets += len;
+ if (!stream->in_buffer) {
+ stream->in_buffer = apr_brigade_create(stream->pool,
+ session->c->bucket_alloc);
+ }
+ apr_brigade_write(stream->in_buffer, NULL, NULL, (const char *)data, len);
+ h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
}
- else {
- req = h2_req_create(id, pool,
- h2_config_geti(session->config, H2_CONF_SER_HEADERS));
+ return status;
+}
+
+static void prep_output(h2_stream *stream) {
+ conn_rec *c = stream->session->c;
+ if (!stream->out_buffer) {
+ stream->out_buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
}
- stream->request = req;
+}
+
+h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
+ h2_stream_monitor *monitor, int initiated_on)
+{
+ h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
- apr_pool_cleanup_register(pool, stream, stream_pool_cleanup,
- apr_pool_cleanup_null);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03082)
- "h2_stream(%ld-%d): opened", session->id, stream->id);
+ stream->id = id;
+ stream->initiated_on = initiated_on;
+ stream->created = apr_time_now();
+ stream->state = H2_SS_IDLE;
+ stream->pool = pool;
+ stream->session = session;
+ stream->monitor = monitor;
+ stream->max_mem = session->max_stream_mem;
+
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ stream->in_window_size =
+ nghttp2_session_get_stream_local_window_size(
+ stream->session->ngh2, stream->id);
+#endif
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ H2_STRM_LOG(APLOGNO(03082), stream, "created"));
+ on_state_enter(stream);
return stream;
}
void h2_stream_cleanup(h2_stream *stream)
{
- AP_DEBUG_ASSERT(stream);
- if (stream->buffer) {
- apr_brigade_cleanup(stream->buffer);
+ apr_status_t status;
+
+ ap_assert(stream);
+ if (stream->out_buffer) {
+ /* remove any left over output buckets that may still have
+ * references into request pools */
+ apr_brigade_cleanup(stream->out_buffer);
}
if (stream->input) {
- apr_status_t status;
- status = h2_beam_shutdown(stream->input, APR_NONBLOCK_READ, 1);
+ h2_beam_abort(stream->input);
+ status = h2_beam_wait_empty(stream->input, APR_NONBLOCK_READ);
if (status == APR_EAGAIN) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
- "h2_stream(%ld-%d): wait on input shutdown",
- stream->session->id, stream->id);
- status = h2_beam_shutdown(stream->input, APR_BLOCK_READ, 1);
+ H2_STRM_MSG(stream, "wait on input drain"));
+ status = h2_beam_wait_empty(stream->input, APR_BLOCK_READ);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
- "h2_stream(%ld-%d): input shutdown returned",
- stream->session->id, stream->id);
+ H2_STRM_MSG(stream, "input drain returned"));
}
}
}
void h2_stream_destroy(h2_stream *stream)
{
- AP_DEBUG_ASSERT(stream);
+ ap_assert(stream);
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c,
- "h2_stream(%ld-%d): destroy",
- stream->session->id, stream->id);
+ H2_STRM_MSG(stream, "destroy"));
if (stream->pool) {
apr_pool_destroy(stream->pool);
+ stream->pool = NULL;
}
}
-void h2_stream_eos_destroy(h2_stream *stream)
-{
- h2_session_stream_done(stream->session, stream);
- /* stream possibly destroyed */
-}
-
apr_pool_t *h2_stream_detach_pool(h2_stream *stream)
{
apr_pool_t *pool = stream->pool;
@@ -250,374 +590,352 @@ apr_pool_t *h2_stream_detach_pool(h2_str
return pool;
}
-void h2_stream_rst(h2_stream *stream, int error_code)
+apr_status_t h2_stream_prep_processing(h2_stream *stream)
{
- stream->rst_error = error_code;
- close_input(stream);
- close_output(stream);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- "h2_stream(%ld-%d): reset, error=%d",
- stream->session->id, stream->id, error_code);
+ if (stream->request) {
+ const h2_request *r = stream->request;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "schedule %s %s://%s%s chunked=%d"),
+ r->method, r->scheme, r->authority, r->path, r->chunked);
+ setup_input(stream);
+ stream->scheduled = 1;
+ return APR_SUCCESS;
+ }
+ return APR_EINVAL;
}
-struct h2_response *h2_stream_get_response(h2_stream *stream)
+void h2_stream_rst(h2_stream *stream, int error_code)
{
- return stream->response;
+ stream->rst_error = error_code;
+ if (stream->input) {
+ h2_beam_abort(stream->input);
+ }
+ if (stream->output) {
+ h2_beam_leave(stream->output);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "reset, error=%d"), error_code);
+ h2_stream_dispatch(stream, H2_SEV_CANCELLED);
}
-apr_status_t h2_stream_set_request(h2_stream *stream, request_rec *r)
+apr_status_t h2_stream_set_request_rec(h2_stream *stream,
+ request_rec *r, int eos)
{
+ h2_request *req;
apr_status_t status;
- AP_DEBUG_ASSERT(stream);
+
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp == NULL);
if (stream->rst_error) {
return APR_ECONNRESET;
}
- set_state(stream, H2_STREAM_ST_OPEN);
- status = h2_request_rwrite(stream->request, stream->pool, r);
- stream->request->serialize = h2_config_geti(h2_config_rget(r),
- H2_CONF_SER_HEADERS);
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03058)
- "h2_request(%d): rwrite %s host=%s://%s%s",
- stream->request->id, stream->request->method,
- stream->request->scheme, stream->request->authority,
- stream->request->path);
-
+ status = h2_request_rcreate(&req, stream->pool, r);
+ if (status == APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r,
+ H2_STRM_LOG(APLOGNO(03058), stream,
+ "set_request_rec %s host=%s://%s%s"),
+ req->method, req->scheme, req->authority, req->path);
+ stream->rtmp = req;
+ /* simulate the frames that led to this */
+ return h2_stream_recv_frame(stream, NGHTTP2_HEADERS,
+ NGHTTP2_FLAG_END_STREAM);
+ }
return status;
}
+void h2_stream_set_request(h2_stream *stream, const h2_request *r)
+{
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp == NULL);
+ stream->rtmp = h2_request_clone(stream->pool, r);
+}
+
+static void set_error_response(h2_stream *stream, int http_status)
+{
+ if (!h2_stream_is_ready(stream)) {
+ conn_rec *c = stream->session->c;
+ apr_bucket *b;
+ h2_headers *response;
+
+ response = h2_headers_die(http_status, stream->request, stream->pool);
+ prep_output(stream);
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b);
+ b = h2_bucket_headers_create(c->bucket_alloc, response);
+ APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b);
+ }
+}
+
+static apr_status_t add_trailer(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen)
+{
+ conn_rec *c = stream->session->c;
+ char *hname, *hvalue;
+
+ if (nlen == 0 || name[0] == ':') {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c,
+ H2_STRM_LOG(APLOGNO(03060), stream,
+ "pseudo header in trailer"));
+ return APR_EINVAL;
+ }
+ if (h2_req_ignore_trailer(name, nlen)) {
+ return APR_SUCCESS;
+ }
+ if (!stream->trailers) {
+ stream->trailers = apr_table_make(stream->pool, 5);
+ }
+ hname = apr_pstrndup(stream->pool, name, nlen);
+ hvalue = apr_pstrndup(stream->pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+ apr_table_mergen(stream->trailers, hname, hvalue);
+
+ return APR_SUCCESS;
+}
+
apr_status_t h2_stream_add_header(h2_stream *stream,
const char *name, size_t nlen,
const char *value, size_t vlen)
{
+ h2_session *session = stream->session;
int error = 0;
- AP_DEBUG_ASSERT(stream);
- if (stream->response) {
+ apr_status_t status;
+
+ if (stream->has_response) {
return APR_EINVAL;
}
++stream->request_headers_added;
if (name[0] == ':') {
- if ((vlen) > stream->session->s->limit_req_line) {
+ if ((vlen) > session->s->limit_req_line) {
/* pseudo header: approximation of request line size check */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- "h2_stream(%ld-%d): pseudo header %s too long",
- stream->session->id, stream->id, name);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "pseudo %s too long"), name);
error = HTTP_REQUEST_URI_TOO_LARGE;
}
}
- else if ((nlen + 2 + vlen) > stream->session->s->limit_req_fieldsize) {
+ else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
/* header too long */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- "h2_stream(%ld-%d): header %s too long",
- stream->session->id, stream->id, name);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "header %s too long"), name);
error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
}
- if (stream->request_headers_added
- > stream->session->s->limit_req_fields + 4) {
+ if (stream->request_headers_added > session->s->limit_req_fields + 4) {
/* too many header lines, include 4 pseudo headers */
if (stream->request_headers_added
- > stream->session->s->limit_req_fields + 4 + 100) {
+ > session->s->limit_req_fields + 4 + 100) {
/* yeah, right */
+ h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
return APR_ECONNRESET;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- "h2_stream(%ld-%d): too many header lines",
- stream->session->id, stream->id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "too many header lines"));
error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
}
- if (h2_stream_is_scheduled(stream)) {
- return h2_request_add_trailer(stream->request, stream->pool,
- name, nlen, value, vlen);
- }
- else if (error) {
- return h2_stream_set_error(stream, error);
- }
- else {
- if (!input_open(stream)) {
- return APR_ECONNRESET;
+ if (error) {
+ set_error_response(stream, error);
+ return APR_EINVAL;
+ }
+ else if (H2_SS_IDLE == stream->state) {
+ if (!stream->rtmp) {
+ stream->rtmp = h2_req_create(stream->id, stream->pool,
+ NULL, NULL, NULL, NULL, NULL, 0);
}
- return h2_request_add_header(stream->request, stream->pool,
- name, nlen, value, vlen);
- }
-}
-
-apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
- h2_stream_pri_cmp *cmp, void *ctx)
-{
- apr_status_t status;
- AP_DEBUG_ASSERT(stream);
- AP_DEBUG_ASSERT(stream->session);
- AP_DEBUG_ASSERT(stream->session->mplx);
-
- if (!output_open(stream)) {
- return APR_ECONNRESET;
- }
- if (stream->scheduled) {
- return APR_EINVAL;
- }
- if (eos) {
- close_input(stream);
+ status = h2_request_add_header(stream->rtmp, stream->pool,
+ name, nlen, value, vlen);
}
-
- if (stream->response) {
- /* already have a resonse, probably a HTTP error code */
- return h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
- }
-
- /* Seeing the end-of-headers, we have everything we need to
- * start processing it.
- */
- status = h2_request_end_headers(stream->request, stream->pool,
- eos, push_enabled);
- if (status == APR_SUCCESS) {
- stream->request->body = !eos;
- stream->scheduled = 1;
- stream->input_remaining = stream->request->content_length;
-
- status = h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- "h2_stream(%ld-%d): scheduled %s %s://%s%s",
- stream->session->id, stream->id,
- stream->request->method, stream->request->scheme,
- stream->request->authority, stream->request->path);
+ else if (H2_SS_OPEN == stream->state) {
+ status = add_trailer(stream, name, nlen, value, vlen);
}
else {
- h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
- "h2_stream(%ld-%d): RST=2 (internal err) %s %s://%s%s",
- stream->session->id, stream->id,
- stream->request->method, stream->request->scheme,
- stream->request->authority, stream->request->path);
- }
-
- return status;
-}
-
-int h2_stream_is_scheduled(const h2_stream *stream)
-{
- return stream->scheduled;
-}
-
-apr_status_t h2_stream_close_input(h2_stream *stream)
-{
- apr_status_t status = APR_SUCCESS;
-
- AP_DEBUG_ASSERT(stream);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- "h2_stream(%ld-%d): closing input",
- stream->session->id, stream->id);
-
- if (stream->rst_error) {
- return APR_ECONNRESET;
+ status = APR_EINVAL;
}
- if (close_input(stream) && stream->input) {
- status = h2_beam_close(stream->input);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_STRM_MSG(stream, "header %s not accepted"), name);
+ h2_stream_dispatch(stream, H2_SEV_CANCELLED);
}
return status;
}
-apr_status_t h2_stream_write_data(h2_stream *stream,
- const char *data, size_t len, int eos)
+static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
{
- conn_rec *c = stream->session->c;
- apr_status_t status = APR_SUCCESS;
-
- AP_DEBUG_ASSERT(stream);
- if (!stream->input) {
- return APR_EOF;
- }
- if (input_closed(stream) || !stream->request->eoh) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_stream(%ld-%d): writing denied, closed=%d, eoh=%d",
- stream->session->id, stream->id, input_closed(stream),
- stream->request->eoh);
- return APR_EINVAL;
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_stream(%ld-%d): add %ld input bytes",
- stream->session->id, stream->id, (long)len);
-
- if (!stream->request->chunked) {
- stream->input_remaining -= len;
- if (stream->input_remaining < 0) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c,
- APLOGNO(02961)
- "h2_stream(%ld-%d): got %ld more content bytes than announced "
- "in content-length header: %ld",
- stream->session->id, stream->id,
- (long)stream->request->content_length,
- -(long)stream->input_remaining);
- h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
- return APR_ECONNABORTED;
+ if (bb) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ return b;
+ }
+ b = APR_BUCKET_NEXT(b);
}
}
-
- if (!stream->tmp) {
- stream->tmp = apr_brigade_create(stream->pool, c->bucket_alloc);
- }
- apr_brigade_write(stream->tmp, NULL, NULL, data, len);
- if (eos) {
- APR_BRIGADE_INSERT_TAIL(stream->tmp,
- apr_bucket_eos_create(c->bucket_alloc));
- close_input(stream);
- }
-
- status = h2_beam_send(stream->input, stream->tmp, APR_BLOCK_READ);
- apr_brigade_cleanup(stream->tmp);
- return status;
-}
-
-void h2_stream_set_suspended(h2_stream *stream, int suspended)
-{
- AP_DEBUG_ASSERT(stream);
- stream->suspended = !!suspended;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
- "h2_stream(%ld-%d): suspended=%d",
- stream->session->id, stream->id, stream->suspended);
-}
-
-int h2_stream_is_suspended(const h2_stream *stream)
-{
- AP_DEBUG_ASSERT(stream);
- return stream->suspended;
+ return NULL;
}
-static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
-{
- conn_rec *c = stream->session->c;
- apr_bucket *b;
- apr_status_t status;
-
- if (!stream->output) {
- return APR_EOF;
- }
- status = h2_beam_receive(stream->output, stream->buffer,
- APR_NONBLOCK_READ, amount);
- /* The buckets we reveive are using the stream->buffer pool as
- * lifetime which is exactly what we want since this is stream->pool.
- *
- * However: when we send these buckets down the core output filters, the
- * filter might decide to setaside them into a pool of its own. And it
- * might decide, after having sent the buckets, to clear its pool.
- *
- * This is problematic for file buckets because it then closed the contained
- * file. Any split off buckets we sent afterwards will result in a
- * APR_EBADF.
- */
- for (b = APR_BRIGADE_FIRST(stream->buffer);
- b != APR_BRIGADE_SENTINEL(stream->buffer);
- b = APR_BUCKET_NEXT(b)) {
- if (APR_BUCKET_IS_FILE(b)) {
- apr_bucket_file *f = (apr_bucket_file *)b->data;
- apr_pool_t *fpool = apr_file_pool_get(f->fd);
- if (fpool != c->pool) {
- apr_bucket_setaside(b, c->pool);
- if (!stream->files) {
- stream->files = apr_array_make(stream->pool,
- 5, sizeof(apr_file_t*));
+static apr_status_t add_buffered_data(h2_stream *stream, apr_off_t requested,
+ apr_off_t *plen, int *peos, int *is_all,
+ h2_headers **pheaders)
+{
+ apr_bucket *b, *e;
+
+ *peos = 0;
+ *plen = 0;
+ *is_all = 0;
+ if (pheaders) {
+ *pheaders = NULL;
+ }
+
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "add_buffered_data");
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ e = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_FLUSH(b)) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ *peos = 1;
+ return APR_SUCCESS;
+ }
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ if (*plen > 0) {
+ /* data before the response, can only return up to here */
+ return APR_SUCCESS;
+ }
+ else if (pheaders) {
+ *pheaders = h2_bucket_headers_get(b);
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ H2_STRM_MSG(stream, "prep, -> response %d"),
+ (*pheaders)->status);
+ return APR_SUCCESS;
+ }
+ else {
+ return APR_EAGAIN;
}
- APR_ARRAY_PUSH(stream->files, apr_file_t*) = f->fd;
}
}
+ else if (b->length == 0) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else {
+ ap_assert(b->length != (apr_size_t)-1);
+ *plen += b->length;
+ if (*plen >= requested) {
+ *plen = requested;
+ return APR_SUCCESS;
+ }
+ }
+ b = e;
}
- return status;
+ *is_all = 1;
+ return APR_SUCCESS;
}
-apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response,
- h2_bucket_beam *output)
+apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
+ int *peos, h2_headers **pheaders)
{
apr_status_t status = APR_SUCCESS;
- conn_rec *c = stream->session->c;
-
- if (!output_open(stream)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_stream(%ld-%d): output closed",
- stream->session->id, stream->id);
- return APR_ECONNRESET;
- }
-
- stream->response = response;
- stream->output = output;
- stream->buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
-
- h2_stream_filter(stream);
- if (stream->output) {
- status = fill_buffer(stream, 0);
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
- "h2_stream(%ld-%d): set_response(%d)",
- stream->session->id, stream->id,
- stream->response->http_status);
- return status;
-}
+ apr_off_t requested, missing, max_chunk = H2_DATA_CHUNK_SIZE;
+ conn_rec *c;
+ int complete;
-apr_status_t h2_stream_set_error(h2_stream *stream, int http_status)
-{
- h2_response *response;
+ ap_assert(stream);
- if (stream->submitted) {
- return APR_EINVAL;
- }
- response = h2_response_die(stream->id, http_status, stream->request,
- stream->pool);
- return h2_stream_set_response(stream, response, NULL);
-}
-
-static const apr_size_t DATA_CHUNK_SIZE = ((16*1024) - 100 - 9);
-
-apr_status_t h2_stream_out_prepare(h2_stream *stream,
- apr_off_t *plen, int *peos)
-{
- conn_rec *c = stream->session->c;
- apr_status_t status = APR_SUCCESS;
- apr_off_t requested;
-
if (stream->rst_error) {
*plen = 0;
*peos = 1;
return APR_ECONNRESET;
}
+
+ c = stream->session->c;
+ prep_output(stream);
- if (*plen > 0) {
- requested = H2MIN(*plen, DATA_CHUNK_SIZE);
+ /* determine how much we'd like to send. We cannot send more than
+ * is requested. But we can reduce the size in case the master
+ * connection operates in smaller chunks. (TSL warmup) */
+ if (stream->session->io.write_size > 0) {
+ max_chunk = stream->session->io.write_size - 9; /* header bits */
+ }
+ requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk;
+
+ /* count the buffered data until eos or a headers bucket */
+ status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders);
+
+ if (status == APR_EAGAIN) {
+ /* TODO: ugly, someone needs to retrieve the response first */
+ h2_mplx_keep_active(stream->session->mplx, stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_STRM_MSG(stream, "prep, response eagain"));
+ return status;
}
- else {
- requested = DATA_CHUNK_SIZE;
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ if (pheaders && *pheaders) {
+ return APR_SUCCESS;
}
- *plen = requested;
- H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_pre");
- h2_util_bb_avail(stream->buffer, plen, peos);
- if (!*peos && *plen < requested) {
- /* try to get more data */
- status = fill_buffer(stream, (requested - *plen) + DATA_CHUNK_SIZE);
- if (APR_STATUS_IS_EOF(status)) {
+ /* If there we do not have enough buffered data to satisfy the requested
+ * length *and* we counted the _complete_ buffer (and did not stop in the middle
+ * because of meta data there), lets see if we can read more from the
+ * output beam */
+ missing = H2MIN(requested, stream->max_mem) - *plen;
+ if (complete && !*peos && missing > 0) {
+ apr_status_t rv = APR_EOF;
+
+ if (stream->output) {
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre");
+ rv = h2_beam_receive(stream->output, stream->out_buffer,
+ APR_NONBLOCK_READ, stream->max_mem - *plen);
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "post");
+ }
+
+ if (rv == APR_SUCCESS) {
+ /* count the buffer again, now that we have read output */
+ status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders);
+ }
+ else if (APR_STATUS_IS_EOF(rv)) {
apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(stream->buffer, eos);
- status = APR_SUCCESS;
+ APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos);
+ *peos = 1;
+ }
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
+ /* we set this is the status of this call only if there
+ * is no buffered data, see check below */
+ }
+ else {
+ /* real error reading. Give this back directly, even though
+ * we may have something buffered. */
+ status = rv;
+ }
+ }
+
+ if (status == APR_SUCCESS) {
+ if (*peos || *plen) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_STRM_MSG(stream, "prepare, len=%ld eos=%d"),
+ (long)*plen, *peos);
+ }
+ else {
+ status = APR_EAGAIN;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ H2_STRM_MSG(stream, "prepare, no data"));
}
- else if (status == APR_EAGAIN) {
- /* did not receive more, it's ok */
- status = APR_SUCCESS;
- }
- *plen = requested;
- h2_util_bb_avail(stream->buffer, plen, peos);
- }
- H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_post");
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
- "h2_stream(%ld-%d): prepare, len=%ld eos=%d, trailers=%s",
- c->id, stream->id, (long)*plen, *peos,
- (stream->response && stream->response->trailers)?
- "yes" : "no");
- if (!*peos && !*plen && status == APR_SUCCESS) {
- return APR_EAGAIN;
}
return status;
}
+static int is_not_headers(apr_bucket *b)
+{
+ return !H2_BUCKET_IS_HEADERS(b);
+}
apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
apr_off_t *plen, int *peos)
@@ -628,47 +946,28 @@ apr_status_t h2_stream_read_to(h2_stream
if (stream->rst_error) {
return APR_ECONNRESET;
}
- status = h2_append_brigade(bb, stream->buffer, plen, peos);
+ status = h2_append_brigade(bb, stream->out_buffer, plen, peos, is_not_headers);
if (status == APR_SUCCESS && !*peos && !*plen) {
status = APR_EAGAIN;
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
- "h2_stream(%ld-%d): read_to, len=%ld eos=%d",
- c->id, stream->id, (long)*plen, *peos);
+ H2_STRM_MSG(stream, "read_to, len=%ld eos=%d"),
+ (long)*plen, *peos);
return status;
}
-int h2_stream_input_is_open(const h2_stream *stream)
-{
- return input_open(stream);
-}
-
-int h2_stream_needs_submit(const h2_stream *stream)
-{
- switch (stream->state) {
- case H2_STREAM_ST_OPEN:
- case H2_STREAM_ST_CLOSED_INPUT:
- case H2_STREAM_ST_CLOSED_OUTPUT:
- case H2_STREAM_ST_CLOSED:
- return !stream->submitted;
- default:
- return 0;
- }
-}
-
-apr_status_t h2_stream_submit_pushes(h2_stream *stream)
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
{
apr_status_t status = APR_SUCCESS;
apr_array_header_t *pushes;
int i;
- pushes = h2_push_collect_update(stream, stream->request,
- h2_stream_get_response(stream));
+ pushes = h2_push_collect_update(stream, stream->request, response);
if (pushes && !apr_is_empty_array(pushes)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- "h2_stream(%ld-%d): found %d push candidates",
- stream->session->id, stream->id, pushes->nelts);
+ H2_STRM_MSG(stream, "found %d push candidates"),
+ pushes->nelts);
for (i = 0; i < pushes->nelts; ++i) {
h2_push *push = APR_ARRAY_IDX(pushes, i, h2_push*);
h2_stream *s = h2_session_push(stream->session, stream, push);
@@ -683,14 +982,13 @@ apr_status_t h2_stream_submit_pushes(h2_
apr_table_t *h2_stream_get_trailers(h2_stream *stream)
{
- return stream->response? stream->response->trailers : NULL;
+ return NULL;
}
-const h2_priority *h2_stream_get_priority(h2_stream *stream)
+const h2_priority *h2_stream_get_priority(h2_stream *stream,
+ h2_headers *response)
{
- h2_response *response = h2_stream_get_response(stream);
-
- if (response && stream->request && stream->request->initiated_on) {
+ if (response && stream->initiated_on) {
const char *ctype = apr_table_get(response->headers, "content-type");
if (ctype) {
/* FIXME: Not good enough, config needs to come from request->server */
@@ -700,3 +998,86 @@ const h2_priority *h2_stream_get_priorit
return NULL;
}
+int h2_stream_is_ready(h2_stream *stream)
+{
+ if (stream->has_response) {
+ return 1;
+ }
+ else if (stream->out_buffer && get_first_headers_bucket(stream->out_buffer)) {
+ return 1;
+ }
+ return 0;
+}
+
+int h2_stream_was_closed(const h2_stream *stream)
+{
+ switch (stream->state) {
+ case H2_SS_CLOSED:
+ case H2_SS_CLEANUP:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount)
+{
+ h2_session *session = stream->session;
+
+ if (amount > 0) {
+ apr_off_t consumed = amount;
+
+ while (consumed > 0) {
+ int len = (consumed > INT_MAX)? INT_MAX : (int)consumed;
+ nghttp2_session_consume(session->ngh2, stream->id, len);
+ consumed -= len;
+ }
+
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ if (1) {
+ int cur_size = nghttp2_session_get_stream_local_window_size(
+ session->ngh2, stream->id);
+ int win = stream->in_window_size;
+ int thigh = win * 8/10;
+ int tlow = win * 2/10;
+ const int win_max = 2*1024*1024;
+ const int win_min = 32*1024;
+
+ /* Work in progress, probably should add directives for these
+ * values once this stabilizes somewhat. The general idea is
+ * to adapt stream window sizes if the input window changes
+ * a) very quickly (< good RTT) from full to empty
+ * b) only a little bit (> bad RTT)
+ * where in a) it grows and in b) it shrinks again.
+ */
+ if (cur_size > thigh && amount > thigh && win < win_max) {
+ /* almost empty again with one reported consumption, how
+ * long did this take? */
+ long ms = apr_time_msec(apr_time_now() - stream->in_last_write);
+ if (ms < 40) {
+ win = H2MIN(win_max, win + (64*1024));
+ }
+ }
+ else if (cur_size < tlow && amount < tlow && win > win_min) {
+ /* staying full, for how long already? */
+ long ms = apr_time_msec(apr_time_now() - stream->in_last_write);
+ if (ms > 700) {
+ win = H2MAX(win_min, win - (32*1024));
+ }
+ }
+
+ if (win != stream->in_window_size) {
+ stream->in_window_size = win;
+ nghttp2_session_set_local_window_size(session->ngh2,
+ NGHTTP2_FLAG_NONE, stream->id, win);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ "h2_stream(%ld-%d): consumed %ld bytes, window now %d/%d",
+ session->id, stream->id, (long)amount,
+ cur_size, stream->in_window_size);
+ }
+#endif
+ }
+ return APR_SUCCESS;
+}
+
diff -up --new-file httpd-2.4.23/modules/http2/h2_stream.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_stream.h
--- httpd-2.4.23/modules/http2/h2_stream.h 2016-05-30 21:58:14.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_stream.h 2017-06-06 11:08:25.000000000 +0200
@@ -25,74 +25,132 @@
* connection to the client. The h2_session writes to the h2_stream,
* adding HEADERS and DATA and finally an EOS. When headers are done,
* h2_stream is scheduled for handling, which is expected to produce
- * a h2_response.
+ * a response h2_headers at least.
*
- * The h2_response gives the HEADER frames to sent to the client, followed
- * by DATA frames read from the h2_stream until EOS is reached.
+ * The h2_headers may be followed by more h2_headers (interim responses) and
+ * by DATA frames read from the h2_stream until EOS is reached. Trailers
+ * are send when a last h2_headers is received. This always closes the stream
+ * output.
*/
struct h2_mplx;
struct h2_priority;
struct h2_request;
-struct h2_response;
+struct h2_headers;
struct h2_session;
-struct h2_sos;
+struct h2_task;
struct h2_bucket_beam;
typedef struct h2_stream h2_stream;
+typedef void h2_stream_state_cb(void *ctx, h2_stream *stream);
+typedef void h2_stream_event_cb(void *ctx, h2_stream *stream,
+ h2_stream_event_t ev);
+
+/**
+ * Callback structure for events and stream state transisitions
+ */
+typedef struct h2_stream_monitor {
+ void *ctx;
+ h2_stream_state_cb *on_state_enter; /* called when a state is entered */
+ h2_stream_state_cb *on_state_invalid; /* called when an invalid state change
+ was detected */
+ h2_stream_event_cb *on_state_event; /* called right before the given event
+ result in a new stream state */
+ h2_stream_event_cb *on_event; /* called for events that do not
+ trigger a state change */
+} h2_stream_monitor;
+
struct h2_stream {
- int id; /* http2 stream id */
- h2_stream_state_t state; /* http/2 state of this stream */
+ int id; /* http2 stream identifier */
+ int initiated_on; /* initiating stream id (PUSH) or 0 */
+ apr_pool_t *pool; /* the memory pool for this stream */
struct h2_session *session; /* the session this stream belongs to */
+ h2_stream_state_t state; /* state of this stream */
- apr_pool_t *pool; /* the memory pool for this stream */
- struct h2_request *request; /* the request made in this stream */
- struct h2_bucket_beam *input;
+ apr_time_t created; /* when stream was created */
+
+ const struct h2_request *request; /* the request made in this stream */
+ struct h2_request *rtmp; /* request being assembled */
+ apr_table_t *trailers; /* optional incoming trailers */
int request_headers_added; /* number of request headers added */
- struct h2_response *response;
+ struct h2_bucket_beam *input;
+ apr_bucket_brigade *in_buffer;
+ int in_window_size;
+ apr_time_t in_last_write;
+
struct h2_bucket_beam *output;
- apr_bucket_brigade *buffer;
- apr_bucket_brigade *tmp;
- apr_array_header_t *files; /* apr_file_t* we collected during I/O */
+ apr_bucket_brigade *out_buffer;
+ apr_size_t max_mem; /* maximum amount of data buffered */
int rst_error; /* stream error for RST_STREAM */
unsigned int aborted : 1; /* was aborted */
- unsigned int suspended : 1; /* DATA sending has been suspended */
unsigned int scheduled : 1; /* stream has been scheduled */
- unsigned int started : 1; /* stream has started processing */
- unsigned int submitted : 1; /* response HEADER has been sent */
+ unsigned int has_response : 1; /* response headers are known */
+ unsigned int input_eof : 1; /* no more request data coming */
+ unsigned int out_checked : 1; /* output eof was double checked */
+ unsigned int push_policy; /* which push policy to use for this request */
+
+ struct h2_task *task; /* assigned task to fullfill request */
- apr_off_t input_remaining; /* remaining bytes on input as advertised via content-length */
- apr_off_t data_frames_sent; /* # of DATA frames sent out for this stream */
+ const h2_priority *pref_priority; /* preferred priority for this stream */
+ apr_off_t out_data_frames; /* # of DATA frames sent */
+ apr_off_t out_data_octets; /* # of DATA octets (payload) sent */
+ apr_off_t in_data_frames; /* # of DATA frames received */
+ apr_off_t in_data_octets; /* # of DATA octets (payload) received */
+
+ h2_stream_monitor *monitor; /* optional monitor for stream states */
};
#define H2_STREAM_RST(s, def) (s->rst_error? s->rst_error : (def))
/**
- * Create a stream in OPEN state.
+ * Create a stream in H2_SS_IDLE state.
* @param id the stream identifier
* @param pool the memory pool to use for this stream
* @param session the session this stream belongs to
+ * @param monitor an optional monitor to be called for events and
+ * state transisitions
+ * @param initiated_on the id of the stream this one was initiated on (PUSH)
+ *
* @return the newly opened stream
*/
-h2_stream *h2_stream_open(int id, apr_pool_t *pool, struct h2_session *session,
- int initiated_on, const struct h2_request *req);
+h2_stream *h2_stream_create(int id, apr_pool_t *pool,
+ struct h2_session *session,
+ h2_stream_monitor *monitor,
+ int initiated_on);
/**
- * Cleanup any resources still held by the stream, called by last bucket.
+ * Destroy memory pool if still owned by the stream.
*/
-void h2_stream_eos_destroy(h2_stream *stream);
+void h2_stream_destroy(h2_stream *stream);
/**
- * Destroy memory pool if still owned by the stream.
+ * Prepare the stream so that processing may start.
+ *
+ * This is the time to allocated resources not needed before.
+ *
+ * @param stream the stream to prep
*/
-void h2_stream_destroy(h2_stream *stream);
+apr_status_t h2_stream_prep_processing(h2_stream *stream);
+
+/*
+ * Set a new monitor for this stream, replacing any existing one. Can
+ * be called with NULL to have no monitor installed.
+ */
+void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor);
+
+/**
+ * Dispatch (handle) an event on the given stream.
+ * @param stream the streama the event happened on
+ * @param ev the type of event
+ */
+void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev);
/**
- * Removes stream from h2_session and destroys it.
+ * Cleanup references into requst processing.
*
* @param stream the stream to cleanup
*/
@@ -107,14 +165,30 @@ void h2_stream_cleanup(h2_stream *stream
*/
apr_pool_t *h2_stream_detach_pool(h2_stream *stream);
+/**
+ * Notify the stream that amount bytes have been consumed of its input
+ * since the last invocation of this method (delta amount).
+ */
+apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount);
+
+/**
+ * Set complete stream headers from given h2_request.
+ *
+ * @param stream stream to write request to
+ * @param r the request with all the meta data
+ * @param eos != 0 iff stream input is closed
+ */
+void h2_stream_set_request(h2_stream *stream, const h2_request *r);
/**
- * Initialize stream->request with the given request_rec.
+ * Set complete stream header from given request_rec.
*
* @param stream stream to write request to
* @param r the request with all the meta data
+ * @param eos != 0 iff stream input is closed
*/
-apr_status_t h2_stream_set_request(h2_stream *stream, request_rec *r);
+apr_status_t h2_stream_set_request_rec(h2_stream *stream,
+ request_rec *r, int eos);
/*
* Add a HTTP/2 header (including pseudo headers) or trailer
@@ -130,22 +204,21 @@ apr_status_t h2_stream_add_header(h2_str
const char *name, size_t nlen,
const char *value, size_t vlen);
-/**
- * Closes the stream's input.
- *
- * @param stream stream to close intput of
- */
-apr_status_t h2_stream_close_input(h2_stream *stream);
+apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags);
+apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags);
/*
- * Write a chunk of DATA to the stream.
+ * Process a frame of received DATA.
*
* @param stream stream to write the data to
+ * @param flags the frame flags
* @param data the beginning of the bytes to write
* @param len the number of bytes to write
*/
-apr_status_t h2_stream_write_data(h2_stream *stream,
- const char *data, size_t len, int eos);
+apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
+ const uint8_t *data, size_t len);
+
+apr_status_t h2_stream_flush_input(h2_stream *stream);
/**
* Reset the stream. Stream write/reads will return errors afterwards.
@@ -156,44 +229,14 @@ apr_status_t h2_stream_write_data(h2_str
void h2_stream_rst(h2_stream *stream, int error_code);
/**
- * Schedule the stream for execution. All header information must be
- * present. Use the given priority comparision callback to determine
- * order in queued streams.
- *
- * @param stream the stream to schedule
- * @param eos != 0 iff no more input will arrive
- * @param cmp priority comparision
- * @param ctx context for comparision
- */
-apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
- h2_stream_pri_cmp *cmp, void *ctx);
-
-/**
- * Determine if stream has been scheduled already.
+ * Determine if stream was closed already. This is true for
+ * states H2_SS_CLOSED, H2_SS_CLEANUP. But not true
+ * for H2_SS_CLOSED_L and H2_SS_CLOSED_R.
+ *
* @param stream the stream to check on
- * @return != 0 iff stream has been scheduled
- */
-int h2_stream_is_scheduled(const h2_stream *stream);
-
-struct h2_response *h2_stream_get_response(h2_stream *stream);
-
-/**
- * Set the response for this stream. Invoked when all meta data for
- * the stream response has been collected.
- *
- * @param stream the stream to set the response for
- * @param response the response data for the stream
- * @param bb bucket brigade with output data for the stream. Optional,
- * may be incomplete.
+ * @return != 0 iff stream has been closed
*/
-apr_status_t h2_stream_set_response(h2_stream *stream,
- struct h2_response *response,
- struct h2_bucket_beam *output);
-
-/**
- * Set the HTTP error status as response.
- */
-apr_status_t h2_stream_set_error(h2_stream *stream, int http_status);
+int h2_stream_was_closed(const h2_stream *stream);
/**
* Do a speculative read on the stream output to determine the
@@ -204,12 +247,13 @@ apr_status_t h2_stream_set_error(h2_stre
* may be read without blocking
* @param peos (out) != 0 iff end of stream will be reached when reading plen
* bytes (out value).
+ * @param presponse (out) the response of one became available
* @return APR_SUCCESS if out information was computed successfully.
* APR_EAGAIN if not data is available and end of stream has not been
* reached yet.
*/
-apr_status_t h2_stream_out_prepare(h2_stream *stream,
- apr_off_t *plen, int *peos);
+apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
+ int *peos, h2_headers **presponse);
/**
* Read a maximum number of bytes into the bucket brigade.
@@ -237,44 +281,34 @@ apr_status_t h2_stream_read_to(h2_stream
apr_table_t *h2_stream_get_trailers(h2_stream *stream);
/**
- * Set the suspended state of the stream.
- * @param stream the stream to change state on
- * @param suspended boolean value if stream is suspended
+ * Submit any server push promises on this stream and schedule
+ * the tasks connection with these.
+ *
+ * @param stream the stream for which to submit
*/
-void h2_stream_set_suspended(h2_stream *stream, int suspended);
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response);
/**
- * Check if the stream has been suspended.
- * @param stream the stream to check
- * @return != 0 iff stream is suspended.
+ * Get priority information set for this stream.
*/
-int h2_stream_is_suspended(const h2_stream *stream);
+const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
+ h2_headers *response);
/**
- * Check if the stream has open input.
- * @param stream the stream to check
- * @return != 0 iff stream has open input.
+ * Return a textual representation of the stream state as in RFC 7540
+ * nomenclator, all caps, underscores.
*/
-int h2_stream_input_is_open(const h2_stream *stream);
+const char *h2_stream_state_str(h2_stream *stream);
/**
- * Check if the stream has not submitted a response or RST yet.
+ * Determine if stream is ready for submitting a response or a RST
* @param stream the stream to check
- * @return != 0 iff stream has not submitted a response or RST.
*/
-int h2_stream_needs_submit(const h2_stream *stream);
+int h2_stream_is_ready(h2_stream *stream);
-/**
- * Submit any server push promises on this stream and schedule
- * the tasks connection with these.
- *
- * @param stream the stream for which to submit
- */
-apr_status_t h2_stream_submit_pushes(h2_stream *stream);
+#define H2_STRM_MSG(s, msg) \
+ "h2_stream(%ld-%d,%s): "msg, s->session->id, s->id, h2_stream_state_str(s)
-/**
- * Get priority information set for this stream.
- */
-const struct h2_priority *h2_stream_get_priority(h2_stream *stream);
+#define H2_STRM_LOG(aplogno, s, msg) aplogno H2_STRM_MSG(s, msg)
#endif /* defined(__mod_h2__h2_stream__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_stream_set.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_stream_set.c
--- httpd-2.4.23/modules/http2/h2_stream_set.c 2015-11-20 16:13:11.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_stream_set.c 1970-01-01 01:00:00.000000000 +0100
@@ -1,145 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <apr_hash.h>
-#include <apr_strings.h>
-
-#include <httpd.h>
-#include <http_log.h>
-
-#include "h2_private.h"
-#include "h2_stream.h"
-#include "h2_stream_set.h"
-
-
-struct h2_stream_set {
- apr_hash_t *hash;
-};
-
-static unsigned int stream_hash(const char *key, apr_ssize_t *klen)
-{
- return (unsigned int)(*((int*)key));
-}
-
-h2_stream_set *h2_stream_set_create(apr_pool_t *pool, int max)
-{
- h2_stream_set *sp = apr_pcalloc(pool, sizeof(h2_stream_set));
- sp->hash = apr_hash_make_custom(pool, stream_hash);
-
- return sp;
-}
-
-void h2_stream_set_destroy(h2_stream_set *sp)
-{
- (void)sp;
-}
-
-h2_stream *h2_stream_set_get(h2_stream_set *sp, int stream_id)
-{
- return apr_hash_get(sp->hash, &stream_id, sizeof(stream_id));
-}
-
-void h2_stream_set_add(h2_stream_set *sp, h2_stream *stream)
-{
- apr_hash_set(sp->hash, &stream->id, sizeof(stream->id), stream);
-}
-
-void h2_stream_set_remove(h2_stream_set *sp, int stream_id)
-{
- apr_hash_set(sp->hash, &stream_id, sizeof(stream_id), NULL);
-}
-
-int h2_stream_set_is_empty(h2_stream_set *sp)
-{
- return apr_hash_count(sp->hash) == 0;
-}
-
-apr_size_t h2_stream_set_size(h2_stream_set *sp)
-{
- return apr_hash_count(sp->hash);
-}
-
-typedef struct {
- h2_stream_set_iter_fn *iter;
- void *ctx;
-} iter_ctx;
-
-static int hash_iter(void *ctx, const void *key, apr_ssize_t klen,
- const void *val)
-{
- iter_ctx *ictx = ctx;
- return ictx->iter(ictx->ctx, (h2_stream*)val);
-}
-
-void h2_stream_set_iter(h2_stream_set *sp,
- h2_stream_set_iter_fn *iter, void *ctx)
-{
- iter_ctx ictx;
- ictx.iter = iter;
- ictx.ctx = ctx;
- apr_hash_do(hash_iter, &ictx, sp->hash);
-}
-
-static int unsubmitted_iter(void *ctx, h2_stream *stream)
-{
- if (h2_stream_needs_submit(stream)) {
- *((int *)ctx) = 1;
- return 0;
- }
- return 1;
-}
-
-int h2_stream_set_has_unsubmitted(h2_stream_set *sp)
-{
- int has_unsubmitted = 0;
- h2_stream_set_iter(sp, unsubmitted_iter, &has_unsubmitted);
- return has_unsubmitted;
-}
-
-static int input_open_iter(void *ctx, h2_stream *stream)
-{
- if (h2_stream_input_is_open(stream)) {
- *((int *)ctx) = 1;
- return 0;
- }
- return 1;
-}
-
-int h2_stream_set_has_open_input(h2_stream_set *sp)
-{
- int has_input_open = 0;
- h2_stream_set_iter(sp, input_open_iter, &has_input_open);
- return has_input_open;
-}
-
-static int suspended_iter(void *ctx, h2_stream *stream)
-{
- if (h2_stream_is_suspended(stream)) {
- *((int *)ctx) = 1;
- return 0;
- }
- return 1;
-}
-
-int h2_stream_set_has_suspended(h2_stream_set *sp)
-{
- int has_suspended = 0;
- h2_stream_set_iter(sp, suspended_iter, &has_suspended);
- return has_suspended;
-}
-
diff -up --new-file httpd-2.4.23/modules/http2/h2_stream_set.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_stream_set.h
--- httpd-2.4.23/modules/http2/h2_stream_set.h 2015-11-20 16:13:11.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_stream_set.h 1970-01-01 01:00:00.000000000 +0100
@@ -1,51 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_stream_set__
-#define __mod_h2__h2_stream_set__
-
-/**
- * A set of h2_stream instances. Allows lookup by stream id
- * and other criteria.
- */
-
-typedef h2_stream *h2_stream_set_match_fn(void *ctx, h2_stream *stream);
-typedef int h2_stream_set_iter_fn(void *ctx, h2_stream *stream);
-
-typedef struct h2_stream_set h2_stream_set;
-
-
-h2_stream_set *h2_stream_set_create(apr_pool_t *pool, int max);
-
-void h2_stream_set_destroy(h2_stream_set *sp);
-
-void h2_stream_set_add(h2_stream_set *sp, h2_stream *stream);
-
-h2_stream *h2_stream_set_get(h2_stream_set *sp, int stream_id);
-
-void h2_stream_set_remove(h2_stream_set *sp, int stream_id);
-
-void h2_stream_set_iter(h2_stream_set *sp,
- h2_stream_set_iter_fn *iter, void *ctx);
-
-int h2_stream_set_is_empty(h2_stream_set *sp);
-
-apr_size_t h2_stream_set_size(h2_stream_set *sp);
-
-int h2_stream_set_has_unsubmitted(h2_stream_set *sp);
-int h2_stream_set_has_open_input(h2_stream_set *sp);
-int h2_stream_set_has_suspended(h2_stream_set *sp);
-
-#endif /* defined(__mod_h2__h2_stream_set__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_switch.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_switch.c
--- httpd-2.4.23/modules/http2/h2_switch.c 2016-02-10 00:09:24.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_switch.c 2017-07-04 14:34:15.000000000 +0200
@@ -55,6 +55,10 @@ static int h2_protocol_propose(conn_rec
const char **protos = is_tls? h2_tls_protos : h2_clear_protos;
(void)s;
+ if (!h2_mpm_supported()) {
+ return DECLINED;
+ }
+
if (strcmp(AP_PROTOCOL_HTTP1, ap_get_protocol(c))) {
/* We do not know how to switch from anything else but http/1.1.
*/
@@ -127,6 +131,10 @@ static int h2_protocol_switch(conn_rec *
const char **p = protos;
(void)s;
+ if (!h2_mpm_supported()) {
+ return DECLINED;
+ }
+
while (*p) {
if (!strcmp(*p, protocol)) {
found = 1;
@@ -160,6 +168,7 @@ static int h2_protocol_switch(conn_rec *
if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088)
"session setup");
+ h2_ctx_clear(c);
return status;
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_switch.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_switch.h
--- httpd-2.4.23/modules/http2/h2_switch.h 2015-09-07 19:37:19.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_switch.h 2016-08-25 14:48:18.000000000 +0200
@@ -17,7 +17,7 @@
#define __mod_h2__h2_switch__
/*
- * One time, post config intialization.
+ * One time, post config initialization.
*/
apr_status_t h2_switch_init(apr_pool_t *pool, server_rec *s);
diff -up --new-file httpd-2.4.23/modules/http2/h2_task.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_task.c
--- httpd-2.4.23/modules/http2/h2_task.c 2016-06-22 15:18:13.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_task.c 2017-10-13 10:37:45.000000000 +0200
@@ -17,7 +17,6 @@
#include <stddef.h>
#include <apr_atomic.h>
-#include <apr_thread_cond.h>
#include <apr_strings.h>
#include <httpd.h>
@@ -42,12 +41,27 @@
#include "h2_h2.h"
#include "h2_mplx.h"
#include "h2_request.h"
+#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
#include "h2_task.h"
-#include "h2_worker.h"
#include "h2_util.h"
+static void H2_TASK_OUT_LOG(int lvl, h2_task *task, apr_bucket_brigade *bb,
+ const char *tag)
+{
+ if (APLOG_C_IS_LEVEL(task->c, lvl)) {
+ conn_rec *c = task->c;
+ char buffer[4 * 1024];
+ const char *line = "(null)";
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
+
+ len = h2_util_bb_print(buffer, bmax, tag, "", bb);
+ ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s",
+ task->id, len? buffer : line);
+ }
+}
+
/*******************************************************************************
* task input handling
******************************************************************************/
@@ -59,107 +73,186 @@ static int input_ser_header(void *ctx, c
return 1;
}
-static void make_chunk(h2_task *task, apr_bucket_brigade *bb,
- apr_bucket *first, apr_uint64_t chunk_len,
- apr_bucket *tail)
-{
- /* Surround the buckets [first, tail[ with new buckets carrying the
- * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
- * to the end of the brigade. */
- char buffer[128];
- apr_bucket *c;
- int len;
-
- len = apr_snprintf(buffer, H2_ALEN(buffer),
- "%"APR_UINT64_T_HEX_FMT"\r\n", chunk_len);
- c = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
- APR_BUCKET_INSERT_BEFORE(first, c);
- c = apr_bucket_heap_create("\r\n", 2, NULL, bb->bucket_alloc);
- if (tail) {
- APR_BUCKET_INSERT_BEFORE(tail, c);
- }
- else {
- APR_BRIGADE_INSERT_TAIL(bb, c);
- }
+/*******************************************************************************
+ * task output handling
+ ******************************************************************************/
+
+static apr_status_t open_output(h2_task *task)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03348)
+ "h2_task(%s): open output to %s %s %s",
+ task->id, task->request->method,
+ task->request->authority,
+ task->request->path);
+ task->output.opened = 1;
+ return h2_mplx_out_open(task->mplx, task->stream_id, task->output.beam);
}
-static apr_status_t input_handle_eos(h2_task *task, request_rec *r,
- apr_bucket *b)
+static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block)
{
- apr_status_t status = APR_SUCCESS;
- apr_bucket_brigade *bb = task->input.bb;
- apr_table_t *t = task->request? task->request->trailers : NULL;
+ apr_off_t written, left;
+ apr_status_t status;
- if (task->input.chunked) {
- task->input.tmp = apr_brigade_split_ex(bb, b, task->input.tmp);
- if (t && !apr_is_empty_table(t)) {
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
- apr_table_do(input_ser_header, task, t, NULL);
- status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
- }
- else {
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ apr_brigade_length(bb, 0, &written);
+ H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out");
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)");
+ /* engines send unblocking */
+ status = h2_beam_send(task->output.beam, bb,
+ block? APR_BLOCK_READ : APR_NONBLOCK_READ);
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)");
+
+ if (APR_STATUS_IS_EAGAIN(status)) {
+ apr_brigade_length(bb, 0, &left);
+ written -= left;
+ status = APR_SUCCESS;
+ }
+ if (status == APR_SUCCESS) {
+ if (h2_task_logio_add_bytes_out) {
+ h2_task_logio_add_bytes_out(task->c, written);
}
- APR_BRIGADE_CONCAT(bb, task->input.tmp);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
+ "h2_task(%s): send_out done", task->id);
}
- else if (r && t && !apr_is_empty_table(t)){
- /* trailers passed in directly. */
- apr_table_overlap(r->trailers_in, t, APR_OVERLAP_TABLES_SET);
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c,
+ "h2_task(%s): send_out (%ld bytes)",
+ task->id, (long)written);
}
- task->input.eos_written = 1;
return status;
}
-static apr_status_t input_append_eos(h2_task *task, request_rec *r)
+/* Bring the data from the brigade (which represents the result of the
+ * request_rec out filter chain) into the h2_mplx for further sending
+ * on the master connection.
+ */
+static apr_status_t slave_out(h2_task *task, ap_filter_t* f,
+ apr_bucket_brigade* bb)
{
- apr_status_t status = APR_SUCCESS;
- apr_bucket_brigade *bb = task->input.bb;
- apr_table_t *t = task->request? task->request->trailers : NULL;
+ apr_bucket *b;
+ apr_status_t rv = APR_SUCCESS;
+ int flush = 0, blocking;
+
+ if (task->frozen) {
+ h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
+ "frozen task output write, ignored", bb);
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (AP_BUCKET_IS_EOR(b)) {
+ APR_BUCKET_REMOVE(b);
+ task->eor = b;
+ }
+ else {
+ apr_bucket_delete(b);
+ }
+ }
+ return APR_SUCCESS;
+ }
- if (task->input.chunked) {
- if (t && !apr_is_empty_table(t)) {
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
- apr_table_do(input_ser_header, task, t, NULL);
- status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+send:
+ /* we send block once we opened the output, so someone is there
+ * reading it *and* the task is not assigned to a h2_req_engine */
+ blocking = (!task->assigned && task->output.opened);
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b)) {
+ if (APR_BUCKET_IS_FLUSH(b) || APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
+ flush = 1;
+ break;
}
- else {
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ }
+
+ if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) {
+ /* still have data buffered from previous attempt.
+ * setaside and append new data and try to pass the complete data */
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ if (APR_SUCCESS != (rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool))) {
+ goto out;
+ }
}
+ rv = send_out(task, task->output.bb, blocking);
}
- else if (r && t && !apr_is_empty_table(t)){
- /* trailers passed in directly. */
- apr_table_overlap(r->trailers_in, t, APR_OVERLAP_TABLES_SET);
+ else {
+ /* no data buffered previously, pass brigade directly */
+ rv = send_out(task, bb, blocking);
+
+ if (APR_SUCCESS == rv && !APR_BRIGADE_EMPTY(bb)) {
+ /* output refused to buffer it all, time to open? */
+ if (!task->output.opened && APR_SUCCESS == (rv = open_output(task))) {
+ /* Make another attempt to send the data. With the output open,
+ * the call might be blocking and send all data, so we do not need
+ * to save the brigade */
+ goto send;
+ }
+ else if (blocking && flush) {
+ /* Need to keep on doing this. */
+ goto send;
+ }
+
+ if (APR_SUCCESS == rv) {
+ /* could not write all, buffer the rest */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405)
+ "h2_slave_out(%s): saving brigade", task->id);
+ ap_assert(NULL);
+ rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
+ flush = 1;
+ }
+ }
}
- APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(bb->bucket_alloc));
- task->input.eos_written = 1;
- return status;
+
+ if (APR_SUCCESS == rv && !task->output.opened && flush) {
+ /* got a flush or could not write all, time to tell someone to read */
+ rv = open_output(task);
+ }
+out:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c,
+ "h2_slave_out(%s): slave_out leave", task->id);
+ return rv;
+}
+
+static apr_status_t output_finish(h2_task *task)
+{
+ if (!task->output.opened) {
+ return open_output(task);
+ }
+ return APR_SUCCESS;
}
-static apr_status_t input_read(h2_task *task, ap_filter_t* f,
- apr_bucket_brigade* bb, ap_input_mode_t mode,
- apr_read_type_e block, apr_off_t readbytes)
+/*******************************************************************************
+ * task slave connection filters
+ ******************************************************************************/
+
+static apr_status_t h2_filter_slave_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
{
+ h2_task *task;
apr_status_t status = APR_SUCCESS;
- apr_bucket *b, *next, *first_data;
- apr_off_t bblen = 0;
+ apr_bucket *b, *next;
+ apr_off_t bblen;
+ const int trace1 = APLOGctrace1(f->c);
+ apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
+ (apr_size_t)readbytes : APR_SIZE_MAX);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task(%s): read, mode=%d, block=%d, readbytes=%ld",
- task->id, mode, block, (long)readbytes);
+ task = h2_ctx_cget_task(f->c);
+ ap_assert(task);
+
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld",
+ task->id, mode, block, (long)readbytes);
+ }
if (mode == AP_MODE_INIT) {
return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
}
- if (f->c->aborted || !task->request) {
+ if (f->c->aborted) {
return APR_ECONNABORTED;
}
if (!task->input.bb) {
- if (!task->input.eos_written) {
- input_append_eos(task, f->r);
- return APR_SUCCESS;
- }
return APR_EOF;
}
@@ -173,25 +266,25 @@ static apr_status_t input_read(h2_task *
}
}
- while (APR_BRIGADE_EMPTY(task->input.bb) && !task->input.eos) {
+ while (APR_BRIGADE_EMPTY(task->input.bb)) {
/* Get more input data for our request. */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task(%s): get more data from mplx, block=%d, "
- "readbytes=%ld, queued=%ld",
- task->id, block, (long)readbytes, (long)bblen);
-
- /* Override the block mode we get called with depending on the input's
- * setting. */
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_slave_in(%s): get more data from mplx, block=%d, "
+ "readbytes=%ld", task->id, block, (long)readbytes);
+ }
if (task->input.beam) {
status = h2_beam_receive(task->input.beam, task->input.bb, block,
- H2MIN(readbytes, 32*1024));
+ 128*1024);
}
else {
status = APR_EOF;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
- "h2_task(%s): read returned", task->id);
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_slave_in(%s): read returned", task->id);
+ }
if (APR_STATUS_IS_EAGAIN(status)
&& (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
/* chunked input handling does not seem to like it if we
@@ -200,69 +293,39 @@ static apr_status_t input_read(h2_task *
status = APR_SUCCESS;
}
else if (APR_STATUS_IS_EOF(status)) {
- task->input.eos = 1;
+ break;
}
else if (status != APR_SUCCESS) {
return status;
}
-
- /* Inspect the buckets received, detect EOS and apply
- * chunked encoding if necessary */
- h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
- "input.beam recv raw", task->input.bb);
- first_data = NULL;
- bblen = 0;
- for (b = APR_BRIGADE_FIRST(task->input.bb);
- b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) {
- next = APR_BUCKET_NEXT(b);
- if (APR_BUCKET_IS_METADATA(b)) {
- if (first_data && task->input.chunked) {
- make_chunk(task, task->input.bb, first_data, bblen, b);
- first_data = NULL;
- bblen = 0;
- }
- if (APR_BUCKET_IS_EOS(b)) {
- task->input.eos = 1;
- input_handle_eos(task, f->r, b);
- h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
- "input.bb after handle eos",
- task->input.bb);
- }
- }
- else if (b->length == 0) {
- apr_bucket_delete(b);
- }
- else {
- if (!first_data) {
- first_data = b;
- }
- bblen += b->length;
- }
- }
- if (first_data && task->input.chunked) {
- make_chunk(task, task->input.bb, first_data, bblen, NULL);
- }
-
+
+ if (trace1) {
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "input.beam recv raw", task->input.bb);
+ }
if (h2_task_logio_add_bytes_in) {
+ apr_brigade_length(bb, 0, &bblen);
h2_task_logio_add_bytes_in(f->c, bblen);
}
}
- if (task->input.eos) {
- if (!task->input.eos_written) {
- input_append_eos(task, f->r);
- }
- if (APR_BRIGADE_EMPTY(task->input.bb)) {
- return APR_EOF;
- }
+ /* Nothing there, no more data to get. Return APR_EAGAIN on
+ * speculative reads, this is ap_check_pipeline()'s trick to
+ * see if the connection needs closing. */
+ if (status == APR_EOF && APR_BRIGADE_EMPTY(task->input.bb)) {
+ return (mode == AP_MODE_SPECULATIVE)? APR_EAGAIN : APR_EOF;
}
- h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
- "task_input.bb", task->input.bb);
+ if (trace1) {
+ h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
+ "task_input.bb", task->input.bb);
+ }
if (APR_BRIGADE_EMPTY(task->input.bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task(%s): no data", task->id);
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_slave_in(%s): no data", task->id);
+ }
return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
}
@@ -271,10 +334,10 @@ static apr_status_t input_read(h2_task *
APR_BRIGADE_CONCAT(bb, task->input.bb);
}
else if (mode == AP_MODE_READBYTES) {
- status = h2_brigade_concat_length(bb, task->input.bb, readbytes);
+ status = h2_brigade_concat_length(bb, task->input.bb, rmax);
}
else if (mode == AP_MODE_SPECULATIVE) {
- status = h2_brigade_copy_length(bb, task->input.bb, readbytes);
+ status = h2_brigade_copy_length(bb, task->input.bb, rmax);
}
else if (mode == AP_MODE_GETLINE) {
/* we are reading a single LF line, e.g. the HTTP headers.
@@ -287,249 +350,72 @@ static apr_status_t input_read(h2_task *
apr_size_t len = sizeof(buffer)-1;
apr_brigade_flatten(bb, buffer, &len);
buffer[len] = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task(%s): getline: %s",
- task->id, buffer);
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_slave_in(%s): getline: %s",
+ task->id, buffer);
+ }
}
}
else {
/* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
* to support it. Seems to work. */
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
- APLOGNO(02942)
- "h2_task, unsupported READ mode %d", mode);
+ APLOGNO(03472)
+ "h2_slave_in(%s), unsupported READ mode %d",
+ task->id, mode);
status = APR_ENOTIMPL;
}
- if (APLOGctrace1(f->c)) {
+ if (trace1) {
apr_brigade_length(bb, 0, &bblen);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task(%s): return %ld data bytes",
- task->id, (long)bblen);
+ "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen);
}
return status;
}
-/*******************************************************************************
- * task output handling
- ******************************************************************************/
-
-static apr_status_t open_response(h2_task *task)
+static apr_status_t h2_filter_slave_output(ap_filter_t* filter,
+ apr_bucket_brigade* brigade)
{
- h2_response *response;
- response = h2_from_h1_get_response(task->output.from_h1);
- if (!response) {
- /* This happens currently when ap_die(status, r) is invoked
- * by a read request filter. */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03204)
- "h2_task(%s): write without response for %s %s %s",
- task->id,
- task->request->method,
- task->request->authority,
- task->request->path);
- task->c->aborted = 1;
- return APR_ECONNABORTED;
- }
-
- if (h2_task_logio_add_bytes_out) {
- /* count headers as if we'd do a HTTP/1.1 serialization */
- task->output.written = h2_util_table_bytes(response->headers, 3)+1;
- h2_task_logio_add_bytes_out(task->c, task->output.written);
- }
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03348)
- "h2_task(%s): open response to %s %s %s",
- task->id, task->request->method,
- task->request->authority,
- task->request->path);
- return h2_mplx_out_open(task->mplx, task->stream_id, response);
-}
-
-static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb)
-{
- apr_off_t written, left;
+ h2_task *task = h2_ctx_cget_task(filter->c);
apr_status_t status;
-
- apr_brigade_length(bb, 0, &written);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_task(%s): write response body (%ld bytes)",
- task->id, (long)written);
- status = h2_beam_send(task->output.beam, bb,
- task->blocking? APR_BLOCK_READ
- : APR_NONBLOCK_READ);
- if (APR_STATUS_IS_EAGAIN(status)) {
- apr_brigade_length(bb, 0, &left);
- written -= left;
- status = APR_SUCCESS;
- }
- if (status == APR_SUCCESS) {
- task->output.written += written;
- if (h2_task_logio_add_bytes_out) {
- h2_task_logio_add_bytes_out(task->c, written);
- }
+ ap_assert(task);
+ status = slave_out(task, filter, brigade);
+ if (status != APR_SUCCESS) {
+ h2_task_rst(task, H2_ERR_INTERNAL_ERROR);
}
return status;
}
-/* Bring the data from the brigade (which represents the result of the
- * request_rec out filter chain) into the h2_mplx for further sending
- * on the master connection.
- */
-static apr_status_t output_write(h2_task *task, ap_filter_t* f,
- apr_bucket_brigade* bb)
+static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb)
{
- apr_bucket *b;
- apr_status_t status = APR_SUCCESS;
- int flush = 0;
-
- if (APR_BRIGADE_EMPTY(bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_task(%s): empty write", task->id);
- return APR_SUCCESS;
- }
-
- if (task->frozen) {
- h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
- "frozen task output write, ignored", bb);
- while (!APR_BRIGADE_EMPTY(bb)) {
- b = APR_BRIGADE_FIRST(bb);
- if (AP_BUCKET_IS_EOR(b)) {
- APR_BUCKET_REMOVE(b);
- task->eor = b;
- }
- else {
- apr_bucket_delete(b);
- }
- }
- return APR_SUCCESS;
- }
-
- if (!task->output.beam) {
- h2_beam_create(&task->output.beam, task->pool,
- task->stream_id, "output", 0);
- }
-
- /* Attempt to write saved brigade first */
- if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) {
- status = send_out(task, task->output.bb);
- if (status != APR_SUCCESS) {
- return status;
- }
- }
+ h2_task *task = h2_ctx_cget_task(f->c);
+ apr_status_t status;
- /* If there is nothing saved (anymore), try to write the brigade passed */
- if ((!task->output.bb || APR_BRIGADE_EMPTY(task->output.bb))
- && !APR_BRIGADE_EMPTY(bb)) {
- /* check if we have a flush before the end-of-request */
- if (!task->output.response_open) {
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b)) {
- if (AP_BUCKET_IS_EOR(b)) {
- break;
- }
- else if (APR_BUCKET_IS_FLUSH(b)) {
- flush = 1;
- }
- }
- }
-
- status = send_out(task, bb);
- if (status != APR_SUCCESS) {
+ ap_assert(task);
+ /* There are cases where we need to parse a serialized http/1.1
+ * response. One example is a 100-continue answer in serialized mode
+ * or via a mod_proxy setup */
+ while (bb && !task->output.sent_response) {
+ status = h2_from_h1_parse_response(task, f, bb);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_task(%s): parsed response", task->id);
+ if (APR_BRIGADE_EMPTY(bb) || status != APR_SUCCESS) {
return status;
}
}
- /* If the passed brigade is not empty, save it before return */
- if (!APR_BRIGADE_EMPTY(bb)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03405)
- "h2_task(%s): could not write all, saving brigade",
- task->id);
- if (!task->output.bb) {
- task->output.bb = apr_brigade_create(task->pool,
- task->c->bucket_alloc);
- }
- return ap_save_brigade(f, &task->output.bb, &bb, task->pool);
- }
-
- if (!task->output.response_open
- && (flush || h2_beam_get_mem_used(task->output.beam) > (32*1024))) {
- /* if we have enough buffered or we got a flush bucket, open
- * the response now. */
- status = open_response(task);
- task->output.response_open = 1;
- }
-
- return status;
-}
-
-static apr_status_t output_finish(h2_task *task)
-{
- apr_status_t status = APR_SUCCESS;
-
- if (!task->output.response_open) {
- status = open_response(task);
- task->output.response_open = 1;
- }
- return status;
-}
-
-/*******************************************************************************
- * task slave connection filters
- ******************************************************************************/
-
-static apr_status_t h2_filter_stream_input(ap_filter_t* filter,
- apr_bucket_brigade* brigade,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes)
-{
- h2_task *task = h2_ctx_cget_task(filter->c);
- AP_DEBUG_ASSERT(task);
- return input_read(task, filter, brigade, mode, block, readbytes);
-}
-
-static apr_status_t h2_filter_stream_output(ap_filter_t* filter,
- apr_bucket_brigade* brigade)
-{
- h2_task *task = h2_ctx_cget_task(filter->c);
- AP_DEBUG_ASSERT(task);
- return output_write(task, filter, brigade);
-}
-
-static apr_status_t h2_filter_read_response(ap_filter_t* filter,
- apr_bucket_brigade* bb)
-{
- h2_task *task = h2_ctx_cget_task(filter->c);
- AP_DEBUG_ASSERT(task);
- if (!task->output.from_h1) {
- return APR_ECONNABORTED;
- }
- return h2_from_h1_read_response(task->output.from_h1, filter, bb);
+ return ap_pass_brigade(f->next, bb);
}
/*******************************************************************************
* task things
******************************************************************************/
-void h2_task_set_response(h2_task *task, h2_response *response)
-{
- AP_DEBUG_ASSERT(response);
- AP_DEBUG_ASSERT(!task->response);
- /* we used to clone the response into out own pool. But
- * we have much tighter control over the EOR bucket nowadays,
- * so just use the instance given */
- task->response = response;
- if (response->rst_error) {
- h2_task_rst(task, response->rst_error);
- }
-}
-
-
int h2_task_can_redo(h2_task *task) {
- if (task->submitted
- || (task->input.beam && h2_beam_was_received(task->input.beam))
- || !task->request) {
+ if (task->input.beam && h2_beam_was_received(task->input.beam)) {
/* cannot repeat that. */
return 0;
}
@@ -540,7 +426,6 @@ int h2_task_can_redo(h2_task *task) {
void h2_task_redo(h2_task *task)
{
- task->response = NULL;
task->rst_error = 0;
}
@@ -548,9 +433,9 @@ void h2_task_rst(h2_task *task, int erro
{
task->rst_error = error;
if (task->input.beam) {
- h2_beam_abort(task->input.beam);
+ h2_beam_leave(task->input.beam);
}
- if (task->output.beam) {
+ if (!task->worker_done) {
h2_beam_abort(task->output.beam);
}
if (task->c) {
@@ -582,15 +467,18 @@ void h2_task_register_hooks(void)
ap_hook_process_connection(h2_task_process_conn,
NULL, NULL, APR_HOOK_FIRST);
- ap_register_output_filter("H2_RESPONSE", h2_response_output_filter,
- NULL, AP_FTYPE_PROTOCOL);
- ap_register_input_filter("H2_TO_H1", h2_filter_stream_input,
+ ap_register_input_filter("H2_SLAVE_IN", h2_filter_slave_in,
NULL, AP_FTYPE_NETWORK);
- ap_register_output_filter("H1_TO_H2", h2_filter_stream_output,
+ ap_register_output_filter("H2_SLAVE_OUT", h2_filter_slave_output,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1,
NULL, AP_FTYPE_NETWORK);
- ap_register_output_filter("H1_TO_H2_RESP", h2_filter_read_response,
+
+ ap_register_input_filter("H2_REQUEST", h2_filter_request_in,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_RESPONSE", h2_filter_headers_out,
NULL, AP_FTYPE_PROTOCOL);
- ap_register_output_filter("H2_TRAILERS", h2_response_trailers_filter,
+ ap_register_output_filter("H2_TRAILERS_OUT", h2_filter_trailers_out,
NULL, AP_FTYPE_PROTOCOL);
}
@@ -616,53 +504,51 @@ static int h2_task_pre_conn(conn_rec* c,
if (h2_ctx_is_task(ctx)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
"h2_h2, pre_connection, found stream task");
-
- /* Add our own, network level in- and output filters.
- */
- ap_add_input_filter("H2_TO_H1", NULL, NULL, c);
- ap_add_output_filter("H1_TO_H2", NULL, NULL, c);
+ ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
+ ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
+ ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
}
return OK;
}
-h2_task *h2_task_create(conn_rec *c, const h2_request *req,
- h2_bucket_beam *input, h2_mplx *mplx)
+h2_task *h2_task_create(conn_rec *slave, int stream_id,
+ const h2_request *req, h2_mplx *m,
+ h2_bucket_beam *input,
+ apr_interval_time_t timeout,
+ apr_size_t output_max_mem)
{
apr_pool_t *pool;
h2_task *task;
- apr_pool_create(&pool, c->pool);
+ ap_assert(slave);
+ ap_assert(req);
+
+ apr_pool_create(&pool, slave->pool);
task = apr_pcalloc(pool, sizeof(h2_task));
if (task == NULL) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, c,
- APLOGNO(02941) "h2_task(%ld-%d): create stream task",
- c->id, req->id);
return NULL;
}
-
- task->id = apr_psprintf(pool, "%ld-%d", c->id, req->id);
- task->stream_id = req->id;
- task->c = c;
- task->mplx = mplx;
- task->c->keepalives = mplx->c->keepalives;
+ task->id = "000";
+ task->stream_id = stream_id;
+ task->c = slave;
+ task->mplx = m;
task->pool = pool;
task->request = req;
- task->ser_headers = req->serialize;
- task->blocking = 1;
+ task->timeout = timeout;
task->input.beam = input;
-
- apr_thread_cond_create(&task->cond, pool);
+ task->output.max_buffer = output_max_mem;
- h2_ctx_create_for(c, task);
return task;
}
void h2_task_destroy(h2_task *task)
{
if (task->output.beam) {
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy");
h2_beam_destroy(task->output.beam);
task->output.beam = NULL;
}
+
if (task->eor) {
apr_bucket_destroy(task->eor);
}
@@ -671,57 +557,91 @@ void h2_task_destroy(h2_task *task)
}
}
-void h2_task_set_io_blocking(h2_task *task, int blocking)
-{
- task->blocking = blocking;
-}
-
-apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread)
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id)
{
- AP_DEBUG_ASSERT(task);
+ conn_rec *c;
- task->input.block = APR_BLOCK_READ;
- task->input.chunked = task->request->chunked;
- task->input.eos = !task->request->body;
- if (task->input.eos && !task->input.chunked && !task->ser_headers) {
- /* We do not serialize/chunk and have eos already, no need to
- * create a bucket brigade. */
- task->input.bb = NULL;
- task->input.eos_written = 1;
- }
- else {
- task->input.bb = apr_brigade_create(task->pool, task->c->bucket_alloc);
- if (task->ser_headers) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_task(%s): serialize request %s %s",
- task->id, task->request->method, task->request->path);
- apr_brigade_printf(task->input.bb, NULL,
- NULL, "%s %s HTTP/1.1\r\n",
- task->request->method, task->request->path);
- apr_table_do(input_ser_header, task, task->request->headers, NULL);
- apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n");
+ ap_assert(task);
+ c = task->c;
+ task->worker_started = 1;
+ task->started_at = apr_time_now();
+
+ if (c->master) {
+ /* Each conn_rec->id is supposed to be unique at a point in time. Since
+ * some modules (and maybe external code) uses this id as an identifier
+ * for the request_rec they handle, it needs to be unique for slave
+ * connections also.
+ * The connection id is generated by the MPM and most MPMs use the formula
+ * id := (child_num * max_threads) + thread_num
+ * which means that there is a maximum id of about
+ * idmax := max_child_count * max_threads
+ * If we assume 2024 child processes with 2048 threads max, we get
+ * idmax ~= 2024 * 2048 = 2 ** 22
+ * On 32 bit systems, we have not much space left, but on 64 bit systems
+ * (and higher?) we can use the upper 32 bits without fear of collision.
+ * 32 bits is just what we need, since a connection can only handle so
+ * many streams.
+ */
+ int slave_id, free_bits;
+
+ task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id,
+ task->stream_id);
+ if (sizeof(unsigned long) >= 8) {
+ free_bits = 32;
+ slave_id = task->stream_id;
}
- if (task->input.eos) {
- input_append_eos(task, NULL);
+ else {
+ /* Assume we have a more limited number of threads/processes
+ * and h2 workers on a 32-bit system. Use the worker instead
+ * of the stream id. */
+ free_bits = 8;
+ slave_id = worker_id;
}
+ task->c->id = (c->master->id << free_bits)^slave_id;
+ c->keepalive = AP_CONN_KEEPALIVE;
+ }
+
+ h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output",
+ H2_BEAM_OWNER_SEND, 0, task->timeout);
+ if (!task->output.beam) {
+ return APR_ENOMEM;
}
- task->output.from_h1 = h2_from_h1_create(task->stream_id, task->pool);
+ h2_beam_buffer_size_set(task->output.beam, task->output.max_buffer);
+ h2_beam_send_from(task->output.beam, task->pool);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ h2_ctx_create_for(c, task);
+ apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id);
+
+ h2_slave_run_pre_connection(c, ap_get_conn_socket(c));
+
+ task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc);
+ if (task->request->serialize) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): serialize request %s %s",
+ task->id, task->request->method, task->request->path);
+ apr_brigade_printf(task->input.bb, NULL,
+ NULL, "%s %s HTTP/1.1\r\n",
+ task->request->method, task->request->path);
+ apr_table_do(input_ser_header, task, task->request->headers, NULL);
+ apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n");
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task(%s): process connection", task->id);
+
task->c->current_thread = thread;
- ap_run_process_connection(task->c);
+ ap_run_process_connection(c);
if (task->frozen) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task(%s): process_conn returned frozen task",
task->id);
/* cleanup delayed */
return APR_EAGAIN;
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task(%s): processing done", task->id);
return output_finish(task);
}
@@ -737,6 +657,15 @@ static apr_status_t h2_task_process_requ
"h2_task(%s): create request_rec", task->id);
r = h2_request_create_rec(req, c);
if (r && (r->status == HTTP_OK)) {
+ /* set timeouts for virtual host of request */
+ if (task->timeout != r->server->timeout) {
+ task->timeout = r->server->timeout;
+ h2_beam_timeout_set(task->output.beam, task->timeout);
+ if (task->input.beam) {
+ h2_beam_timeout_set(task->input.beam, task->timeout);
+ }
+ }
+
ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
if (cs) {
@@ -744,16 +673,19 @@ static apr_status_t h2_task_process_requ
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task(%s): start process_request", task->id);
+
ap_process_request(r);
if (task->frozen) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task(%s): process_request frozen", task->id);
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): process_request done", task->id);
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_task(%s): process_request done", task->id);
+ }
/* After the call to ap_process_request, the
- * request pool will have been deleted. We set
+ * request pool may have been deleted. We set
* r=NULL here to ensure that any dereference
* of r that might be added later in this function
* will result in a segfault immediately instead
@@ -786,7 +718,7 @@ static int h2_task_process_conn(conn_rec
ctx = h2_ctx_get(c, 0);
if (h2_ctx_is_task(ctx)) {
- if (!ctx->task->ser_headers) {
+ if (!ctx->task->request->serialize) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_h2, processing request directly");
h2_task_process_request(ctx->task, c);
@@ -819,11 +751,11 @@ apr_status_t h2_task_thaw(h2_task *task)
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407)
"h2_task(%s), thawed", task->id);
}
- task->detached = 1;
+ task->thawed = 1;
return APR_SUCCESS;
}
-int h2_task_is_detached(h2_task *task)
+int h2_task_has_thawed(h2_task *task)
{
- return task->detached;
+ return task->thawed;
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_task.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_task.h
--- httpd-2.4.23/modules/http2/h2_task.h 2016-06-07 13:29:51.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_task.h 2017-03-31 21:41:01.000000000 +0200
@@ -37,14 +37,14 @@
* of our own to disble those.
*/
-struct apr_thread_cond_t;
struct h2_bucket_beam;
struct h2_conn;
struct h2_mplx;
struct h2_task;
struct h2_req_engine;
struct h2_request;
-struct h2_response;
+struct h2_response_parser;
+struct h2_stream;
struct h2_worker;
typedef struct h2_task h2_task;
@@ -56,37 +56,33 @@ struct h2_task {
apr_pool_t *pool;
const struct h2_request *request;
- struct h2_response *response;
+ apr_interval_time_t timeout;
+ int rst_error; /* h2 related stream abort error */
struct {
struct h2_bucket_beam *beam;
- apr_bucket_brigade *bb;
- apr_bucket_brigade *tmp;
- apr_read_type_e block;
- unsigned int chunked : 1;
unsigned int eos : 1;
- unsigned int eos_written : 1;
+ apr_bucket_brigade *bb;
+ apr_bucket_brigade *bbchunk;
+ apr_off_t chunked_total;
} input;
struct {
struct h2_bucket_beam *beam;
- struct h2_from_h1 *from_h1;
- unsigned int response_open : 1;
- apr_off_t written;
+ unsigned int opened : 1;
+ unsigned int sent_response : 1;
+ unsigned int copy_files : 1;
+ struct h2_response_parser *rparser;
apr_bucket_brigade *bb;
+ apr_size_t max_buffer;
} output;
struct h2_mplx *mplx;
- struct apr_thread_cond_t *cond;
- int rst_error; /* h2 related stream abort error */
unsigned int filters_set : 1;
- unsigned int ser_headers : 1;
unsigned int frozen : 1;
- unsigned int blocking : 1;
- unsigned int detached : 1;
- unsigned int submitted : 1; /* response has been submitted to client */
- unsigned int worker_started : 1; /* h2_worker started processing for this io */
- unsigned int worker_done : 1; /* h2_worker finished for this io */
+ unsigned int thawed : 1;
+ unsigned int worker_started : 1; /* h2_worker started processing */
+ unsigned int worker_done : 1; /* h2_worker finished */
apr_time_t started_at; /* when processing started */
apr_time_t done_at; /* when processing was done */
@@ -94,17 +90,17 @@ struct h2_task {
struct h2_req_engine *engine; /* engine hosted by this task */
struct h2_req_engine *assigned; /* engine that task has been assigned to */
- request_rec *r; /* request being processed in this task */
};
-h2_task *h2_task_create(conn_rec *c, const struct h2_request *req,
- struct h2_bucket_beam *input, struct h2_mplx *mplx);
+h2_task *h2_task_create(conn_rec *slave, int stream_id,
+ const h2_request *req, struct h2_mplx *m,
+ struct h2_bucket_beam *input,
+ apr_interval_time_t timeout,
+ apr_size_t output_max_mem);
void h2_task_destroy(h2_task *task);
-apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread);
-
-void h2_task_set_response(h2_task *task, struct h2_response *response);
+apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id);
void h2_task_redo(h2_task *task);
int h2_task_can_redo(h2_task *task);
@@ -125,8 +121,6 @@ extern APR_OPTIONAL_FN_TYPE(ap_logio_add
apr_status_t h2_task_freeze(h2_task *task);
apr_status_t h2_task_thaw(h2_task *task);
-int h2_task_is_detached(h2_task *task);
-
-void h2_task_set_io_blocking(h2_task *task, int blocking);
+int h2_task_has_thawed(h2_task *task);
#endif /* defined(__mod_h2__h2_task__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_util.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_util.c
--- httpd-2.4.23/modules/http2/h2_util.c 2016-06-14 10:51:31.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_util.c 2017-10-13 10:37:45.000000000 +0200
@@ -15,6 +15,8 @@
#include <assert.h>
#include <apr_strings.h>
+#include <apr_thread_mutex.h>
+#include <apr_thread_cond.h>
#include <httpd.h>
#include <http_core.h>
@@ -27,7 +29,7 @@
#include "h2_util.h"
/* h2_log2(n) iff n is a power of 2 */
-unsigned char h2_log2(apr_uint32_t n)
+unsigned char h2_log2(int n)
{
int lz = 0;
if (!n) {
@@ -110,6 +112,8 @@ void h2_util_camel_case_header(char *s,
}
}
+/* base64 url encoding ****************************************************************************/
+
static const int BASE64URL_UINT6[] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0 */
@@ -172,6 +176,7 @@ apr_size_t h2_util_base64url_decode(cons
n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
(BASE64URL_UINT6[ e[mlen+1] ] << 12));
*d++ = n >> 16;
+ remain = 1;
break;
case 3:
n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
@@ -179,6 +184,7 @@ apr_size_t h2_util_base64url_decode(cons
(BASE64URL_UINT6[ e[mlen+2] ] << 6));
*d++ = n >> 16;
*d++ = n >> 8 & 0xffu;
+ remain = 2;
break;
default: /* do nothing */
break;
@@ -187,78 +193,35 @@ apr_size_t h2_util_base64url_decode(cons
}
const char *h2_util_base64url_encode(const char *data,
- apr_size_t len, apr_pool_t *pool)
+ apr_size_t dlen, apr_pool_t *pool)
{
- apr_size_t mlen = ((len+2)/3)*3;
- apr_size_t slen = (mlen/3)*4;
- apr_size_t i;
+ long i, len = (int)dlen;
+ apr_size_t slen = ((dlen+2)/3)*4 + 1; /* 0 terminated */
const unsigned char *udata = (const unsigned char*)data;
- char *enc, *p = apr_pcalloc(pool, slen+1); /* 0 terminated */
+ char *enc, *p = apr_pcalloc(pool, slen);
enc = p;
- for (i = 0; i < mlen; i+= 3) {
+ for (i = 0; i < len-2; i+= 3) {
*p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ];
- *p++ = BASE64URL_CHARS[ ((udata[i] << 4) +
- ((i+1 < len)? (udata[i+1] >> 4) : 0)) & 0x3fu ];
- *p++ = BASE64URL_CHARS[ ((udata[i+1] << 2) +
- ((i+2 < len)? (udata[i+2] >> 6) : 0)) & 0x3fu ];
- if (i+2 < len) {
- *p++ = BASE64URL_CHARS[ udata[i+2] & 0x3fu ];
- }
+ *p++ = BASE64URL_CHARS[ ((udata[i] << 4) + (udata[i+1] >> 4)) & 0x3fu ];
+ *p++ = BASE64URL_CHARS[ ((udata[i+1] << 2) + (udata[i+2] >> 6)) & 0x3fu ];
+ *p++ = BASE64URL_CHARS[ udata[i+2] & 0x3fu ];
}
- return enc;
-}
-
-int h2_util_contains_token(apr_pool_t *pool, const char *s, const char *token)
-{
- char *c;
- if (s) {
- if (!apr_strnatcasecmp(s, token)) { /* the simple life */
- return 1;
- }
-
- for (c = ap_get_token(pool, &s, 0); c && *c;
- c = *s? ap_get_token(pool, &s, 0) : NULL) {
- if (!apr_strnatcasecmp(c, token)) { /* seeing the token? */
- return 1;
- }
- while (*s++ == ';') { /* skip parameters */
- ap_get_token(pool, &s, 0);
- }
- if (*s++ != ',') { /* need comma separation */
- return 0;
- }
+ if (i < len) {
+ *p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ];
+ if (i == (len - 1)) {
+ *p++ = BASE64URL_CHARS[ (udata[i] << 4) & 0x3fu ];
}
- }
- return 0;
-}
-
-const char *h2_util_first_token_match(apr_pool_t *pool, const char *s,
- const char *tokens[], apr_size_t len)
-{
- char *c;
- apr_size_t i;
- if (s && *s) {
- for (c = ap_get_token(pool, &s, 0); c && *c;
- c = *s? ap_get_token(pool, &s, 0) : NULL) {
- for (i = 0; i < len; ++i) {
- if (!apr_strnatcasecmp(c, tokens[i])) {
- return tokens[i];
- }
- }
- while (*s++ == ';') { /* skip parameters */
- ap_get_token(pool, &s, 0);
- }
- if (*s++ != ',') { /* need comma separation */
- return 0;
- }
+ else {
+ *p++ = BASE64URL_CHARS[ ((udata[i] << 4) + (udata[i+1] >> 4)) & 0x3fu ];
+ *p++ = BASE64URL_CHARS[ (udata[i+1] << 2) & 0x3fu ];
}
}
- return NULL;
+ *p++ = '\0';
+ return enc;
}
-
/*******************************************************************************
* ihash - hash for structs with int identifier
******************************************************************************/
@@ -370,39 +333,6 @@ size_t h2_ihash_shift(h2_ihash_t *ih, vo
return ctx.len;
}
-typedef struct {
- h2_ihash_t *ih;
- int *buffer;
- size_t max;
- size_t len;
-} icollect_ctx;
-
-static int icollect_iter(void *x, void *val)
-{
- icollect_ctx *ctx = x;
- if (ctx->len < ctx->max) {
- ctx->buffer[ctx->len++] = *((int*)((char *)val + ctx->ih->ioff));
- return 1;
- }
- return 0;
-}
-
-size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max)
-{
- icollect_ctx ctx;
- size_t i;
-
- ctx.ih = ih;
- ctx.buffer = buffer;
- ctx.max = max;
- ctx.len = 0;
- h2_ihash_iter(ih, icollect_iter, &ctx);
- for (i = 0; i < ctx.len; ++i) {
- h2_ihash_remove(ih, buffer[i]);
- }
- return ctx.len;
-}
-
/*******************************************************************************
* iqueue - sorted list of int
******************************************************************************/
@@ -436,14 +366,16 @@ int h2_iq_count(h2_iqueue *q)
}
-void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
+int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx)
{
int i;
+ if (h2_iq_contains(q, sid)) {
+ return 0;
+ }
if (q->nelts >= q->nalloc) {
iq_grow(q, q->nalloc * 2);
}
-
i = (q->head + q->nelts) % q->nalloc;
q->elts[i] = sid;
++q->nelts;
@@ -452,6 +384,12 @@ void h2_iq_add(h2_iqueue *q, int sid, h2
/* bubble it to the front of the queue */
iq_bubble_up(q, i, q->head, cmp, ctx);
}
+ return 1;
+}
+
+int h2_iq_append(h2_iqueue *q, int sid)
+{
+ return h2_iq_add(q, sid, NULL, NULL);
}
int h2_iq_remove(h2_iqueue *q, int sid)
@@ -522,6 +460,18 @@ int h2_iq_shift(h2_iqueue *q)
return sid;
}
+size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max)
+{
+ int i;
+ for (i = 0; i < max; ++i) {
+ pint[i] = h2_iq_shift(q);
+ if (pint[i] == 0) {
+ break;
+ }
+ }
+ return i;
+}
+
static void iq_grow(h2_iqueue *q, int nlen)
{
if (nlen > q->nalloc) {
@@ -573,6 +523,633 @@ static int iq_bubble_down(h2_iqueue *q,
return i;
}
+int h2_iq_contains(h2_iqueue *q, int sid)
+{
+ int i;
+ for (i = 0; i < q->nelts; ++i) {
+ if (sid == q->elts[(q->head + i) % q->nalloc]) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*******************************************************************************
+ * FIFO queue
+ ******************************************************************************/
+
+struct h2_fifo {
+ void **elems;
+ int nelems;
+ int set;
+ int head;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static int nth_index(h2_fifo *fifo, int n)
+{
+ return (fifo->head + n) % fifo->nelems;
+}
+
+static apr_status_t fifo_destroy(void *data)
+{
+ h2_fifo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int index_of(h2_fifo *fifo, void *elem)
+{
+ int i;
+
+ for (i = 0; i < fifo->count; ++i) {
+ if (elem == fifo->elems[nth_index(fifo, i)]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t create_int(h2_fifo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_fifo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(void*));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->nelems = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, fifo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return create_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_fifo_term(h2_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_interrupt(h2_fifo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_fifo_count(h2_fifo *fifo)
+{
+ return fifo->count;
+}
+
+static apr_status_t check_not_empty(h2_fifo *fifo, int block)
+{
+ while (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push_int(h2_fifo *fifo, void *elem, int block)
+{
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (fifo->set && index_of(fifo, elem) >= 0) {
+ /* set mode, elem already member */
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->nelems) {
+ if (block) {
+ while (fifo->count == fifo->nelems) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ return APR_EAGAIN;
+ }
+ }
+
+ ap_assert(fifo->count < fifo->nelems);
+ fifo->elems[nth_index(fifo, fifo->count)] = elem;
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_push(h2_fifo *fifo, void *elem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = fifo_push_int(fifo, elem, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_push(h2_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 1);
+}
+
+apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem)
+{
+ return fifo_push(fifo, elem, 0);
+}
+
+static apr_status_t pull_head(h2_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = check_not_empty(fifo, block)) != APR_SUCCESS) {
+ *pelem = NULL;
+ return rv;
+ }
+ *pelem = fifo->elems[fifo->head];
+ --fifo->count;
+ if (fifo->count > 0) {
+ fifo->head = nth_index(fifo, 1);
+ if (fifo->count+1 == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t fifo_pull(h2_fifo *fifo, void **pelem, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = pull_head(fifo, pelem, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_pull(h2_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 1);
+}
+
+apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem)
+{
+ return fifo_pull(fifo, pelem, 0);
+}
+
+static apr_status_t fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx, int block)
+{
+ apr_status_t rv;
+ void *elem;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
+ if (APR_SUCCESS == (rv = pull_head(fifo, &elem, block))) {
+ switch (fn(elem, ctx)) {
+ case H2_FIFO_OP_PULL:
+ break;
+ case H2_FIFO_OP_REPUSH:
+ rv = fifo_push_int(fifo, elem, block);
+ break;
+ }
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx)
+{
+ return fifo_peek(fifo, fn, ctx, 1);
+}
+
+apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx)
+{
+ return fifo_peek(fifo, fn, ctx, 0);
+}
+
+apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ int i, rc;
+ void *e;
+
+ rc = 0;
+ for (i = 0; i < fifo->count; ++i) {
+ e = fifo->elems[nth_index(fifo, i)];
+ if (e == elem) {
+ ++rc;
+ }
+ else if (rc) {
+ fifo->elems[nth_index(fifo, i-rc)] = e;
+ }
+ }
+ if (rc) {
+ fifo->count -= rc;
+ if (fifo->count + rc == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+/*******************************************************************************
+ * FIFO int queue
+ ******************************************************************************/
+
+struct h2_ififo {
+ int *elems;
+ int nelems;
+ int set;
+ int head;
+ int count;
+ int aborted;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_empty;
+ apr_thread_cond_t *not_full;
+};
+
+static int inth_index(h2_ififo *fifo, int n)
+{
+ return (fifo->head + n) % fifo->nelems;
+}
+
+static apr_status_t ififo_destroy(void *data)
+{
+ h2_ififo *fifo = data;
+
+ apr_thread_cond_destroy(fifo->not_empty);
+ apr_thread_cond_destroy(fifo->not_full);
+ apr_thread_mutex_destroy(fifo->lock);
+
+ return APR_SUCCESS;
+}
+
+static int iindex_of(h2_ififo *fifo, int id)
+{
+ int i;
+
+ for (i = 0; i < fifo->count; ++i) {
+ if (id == fifo->elems[inth_index(fifo, i)]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static apr_status_t icreate_int(h2_ififo **pfifo, apr_pool_t *pool,
+ int capacity, int as_set)
+{
+ apr_status_t rv;
+ h2_ififo *fifo;
+
+ fifo = apr_pcalloc(pool, sizeof(*fifo));
+ if (fifo == NULL) {
+ return APR_ENOMEM;
+ }
+
+ rv = apr_thread_mutex_create(&fifo->lock,
+ APR_THREAD_MUTEX_UNNESTED, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_empty, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ rv = apr_thread_cond_create(&fifo->not_full, pool);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ fifo->elems = apr_pcalloc(pool, capacity * sizeof(int));
+ if (fifo->elems == NULL) {
+ return APR_ENOMEM;
+ }
+ fifo->nelems = capacity;
+ fifo->set = as_set;
+
+ *pfifo = fifo;
+ apr_pool_cleanup_register(pool, fifo, ififo_destroy, apr_pool_cleanup_null);
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return icreate_int(pfifo, pool, capacity, 0);
+}
+
+apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity)
+{
+ return icreate_int(pfifo, pool, capacity, 1);
+}
+
+apr_status_t h2_ififo_term(h2_ififo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ fifo->aborted = 1;
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_interrupt(h2_ififo *fifo)
+{
+ apr_status_t rv;
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ apr_thread_cond_broadcast(fifo->not_full);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+int h2_ififo_count(h2_ififo *fifo)
+{
+ return fifo->count;
+}
+
+static apr_status_t icheck_not_empty(h2_ififo *fifo, int block)
+{
+ while (fifo->count == 0) {
+ if (!block) {
+ return APR_EAGAIN;
+ }
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_empty, fifo->lock);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_push_int(h2_ififo *fifo, int id, int block)
+{
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (fifo->set && iindex_of(fifo, id) >= 0) {
+ /* set mode, elem already member */
+ return APR_EEXIST;
+ }
+ else if (fifo->count == fifo->nelems) {
+ if (block) {
+ while (fifo->count == fifo->nelems) {
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+ apr_thread_cond_wait(fifo->not_full, fifo->lock);
+ }
+ }
+ else {
+ return APR_EAGAIN;
+ }
+ }
+
+ ap_assert(fifo->count < fifo->nelems);
+ fifo->elems[inth_index(fifo, fifo->count)] = id;
+ ++fifo->count;
+ if (fifo->count == 1) {
+ apr_thread_cond_broadcast(fifo->not_empty);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_push(h2_ififo *fifo, int id, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ififo_push_int(fifo, id, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_push(h2_ififo *fifo, int id)
+{
+ return ififo_push(fifo, id, 1);
+}
+
+apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id)
+{
+ return ififo_push(fifo, id, 0);
+}
+
+static apr_status_t ipull_head(h2_ififo *fifo, int *pi, int block)
+{
+ apr_status_t rv;
+
+ if ((rv = icheck_not_empty(fifo, block)) != APR_SUCCESS) {
+ *pi = 0;
+ return rv;
+ }
+ *pi = fifo->elems[fifo->head];
+ --fifo->count;
+ if (fifo->count > 0) {
+ fifo->head = inth_index(fifo, 1);
+ if (fifo->count+1 == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t ififo_pull(h2_ififo *fifo, int *pi, int block)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ rv = ipull_head(fifo, pi, block);
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_pull(h2_ififo *fifo, int *pi)
+{
+ return ififo_pull(fifo, pi, 1);
+}
+
+apr_status_t h2_ififo_try_pull(h2_ififo *fifo, int *pi)
+{
+ return ififo_pull(fifo, pi, 0);
+}
+
+static apr_status_t ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx, int block)
+{
+ apr_status_t rv;
+ int id;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if (APR_SUCCESS == (rv = apr_thread_mutex_lock(fifo->lock))) {
+ if (APR_SUCCESS == (rv = ipull_head(fifo, &id, block))) {
+ switch (fn(id, ctx)) {
+ case H2_FIFO_OP_PULL:
+ break;
+ case H2_FIFO_OP_REPUSH:
+ rv = ififo_push_int(fifo, id, block);
+ break;
+ }
+ }
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
+apr_status_t h2_ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx)
+{
+ return ififo_peek(fifo, fn, ctx, 1);
+}
+
+apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx)
+{
+ return ififo_peek(fifo, fn, ctx, 0);
+}
+
+apr_status_t h2_ififo_remove(h2_ififo *fifo, int id)
+{
+ apr_status_t rv;
+
+ if (fifo->aborted) {
+ return APR_EOF;
+ }
+
+ if ((rv = apr_thread_mutex_lock(fifo->lock)) == APR_SUCCESS) {
+ int i, rc;
+ int e;
+
+ rc = 0;
+ for (i = 0; i < fifo->count; ++i) {
+ e = fifo->elems[inth_index(fifo, i)];
+ if (e == id) {
+ ++rc;
+ }
+ else if (rc) {
+ fifo->elems[inth_index(fifo, i-rc)] = e;
+ }
+ }
+ if (rc) {
+ fifo->count -= rc;
+ if (fifo->count + rc == fifo->nelems) {
+ apr_thread_cond_broadcast(fifo->not_full);
+ }
+ rv = APR_SUCCESS;
+ }
+ else {
+ rv = APR_EAGAIN;
+ }
+
+ apr_thread_mutex_unlock(fifo->lock);
+ }
+ return rv;
+}
+
/*******************************************************************************
* h2_util for apt_table_t
******************************************************************************/
@@ -618,7 +1195,7 @@ static apr_status_t last_not_included(ap
{
apr_bucket *b;
apr_status_t status = APR_SUCCESS;
- int files_allowed = pfile_buckets_allowed? *pfile_buckets_allowed : 0;
+ int files_allowed = pfile_buckets_allowed? (int)*pfile_buckets_allowed : 0;
if (maxlen >= 0) {
/* Find the bucket, up to which we reach maxlen/mem bytes */
@@ -653,8 +1230,8 @@ static apr_status_t last_not_included(ap
* unless we do not move the file buckets */
--files_allowed;
}
- else if (maxlen < b->length) {
- apr_bucket_split(b, maxlen);
+ else if (maxlen < (apr_off_t)b->length) {
+ apr_bucket_split(b, (apr_size_t)maxlen);
maxlen = 0;
}
else {
@@ -671,17 +1248,16 @@ apr_status_t h2_brigade_concat_length(ap
apr_bucket_brigade *src,
apr_off_t length)
{
- apr_bucket *b, *next;
+ apr_bucket *b;
apr_off_t remain = length;
apr_status_t status = APR_SUCCESS;
- for (b = APR_BRIGADE_FIRST(src);
- b != APR_BRIGADE_SENTINEL(src);
- b = next) {
- next = APR_BUCKET_NEXT(b);
+ while (!APR_BRIGADE_EMPTY(src)) {
+ b = APR_BRIGADE_FIRST(src);
if (APR_BUCKET_IS_METADATA(b)) {
- /* fall through */
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
}
else {
if (remain == b->length) {
@@ -704,10 +1280,10 @@ apr_status_t h2_brigade_concat_length(ap
apr_bucket_split(b, remain);
}
}
+ APR_BUCKET_REMOVE(b);
+ APR_BRIGADE_INSERT_TAIL(dest, b);
+ remain -= b->length;
}
- APR_BUCKET_REMOVE(b);
- APR_BRIGADE_INSERT_TAIL(dest, b);
- remain -= b->length;
}
return status;
}
@@ -852,7 +1428,7 @@ apr_status_t h2_util_bb_readx(apr_bucket
if (data_len > avail) {
apr_bucket_split(b, avail);
- data_len = avail;
+ data_len = (apr_size_t)avail;
}
if (consume) {
@@ -892,51 +1468,15 @@ apr_size_t h2_util_bucket_print(char *bu
off += apr_snprintf(buffer+off, bmax-off, "%s", sep);
}
- if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_EOS(b)) {
- off += apr_snprintf(buffer+off, bmax-off, "eos");
- }
- else if (APR_BUCKET_IS_FLUSH(b)) {
- off += apr_snprintf(buffer+off, bmax-off, "flush");
- }
- else if (AP_BUCKET_IS_EOR(b)) {
- off += apr_snprintf(buffer+off, bmax-off, "eor");
- }
- else {
- off += apr_snprintf(buffer+off, bmax-off, "meta(unknown)");
- }
+ if (bmax <= off) {
+ return off;
}
- else {
- const char *btype = "data";
- if (APR_BUCKET_IS_FILE(b)) {
- btype = "file";
- }
- else if (APR_BUCKET_IS_PIPE(b)) {
- btype = "pipe";
- }
- else if (APR_BUCKET_IS_SOCKET(b)) {
- btype = "socket";
- }
- else if (APR_BUCKET_IS_HEAP(b)) {
- btype = "heap";
- }
- else if (APR_BUCKET_IS_TRANSIENT(b)) {
- btype = "transient";
- }
- else if (APR_BUCKET_IS_IMMORTAL(b)) {
- btype = "immortal";
- }
-#if APR_HAS_MMAP
- else if (APR_BUCKET_IS_MMAP(b)) {
- btype = "mmap";
- }
-#endif
- else if (APR_BUCKET_IS_POOL(b)) {
- btype = "pool";
- }
-
+ else if (APR_BUCKET_IS_METADATA(b)) {
+ off += apr_snprintf(buffer+off, bmax-off, "%s", b->type->name);
+ }
+ else if (bmax > off) {
off += apr_snprintf(buffer+off, bmax-off, "%s[%ld]",
- btype,
+ b->type->name,
(long)(b->length == ((apr_size_t)-1)?
-1 : b->length));
}
@@ -951,20 +1491,24 @@ apr_size_t h2_util_bb_print(char *buffer
const char *sp = "";
apr_bucket *b;
- if (bb) {
- memset(buffer, 0, bmax--);
- off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
- for (b = APR_BRIGADE_FIRST(bb);
- bmax && (b != APR_BRIGADE_SENTINEL(bb));
- b = APR_BUCKET_NEXT(b)) {
-
- off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
- sp = " ";
+ if (bmax > 1) {
+ if (bb) {
+ memset(buffer, 0, bmax--);
+ off += apr_snprintf(buffer+off, bmax-off, "%s(", tag);
+ for (b = APR_BRIGADE_FIRST(bb);
+ (bmax > off) && (b != APR_BRIGADE_SENTINEL(bb));
+ b = APR_BUCKET_NEXT(b)) {
+
+ off += h2_util_bucket_print(buffer+off, bmax-off, b, sp);
+ sp = " ";
+ }
+ if (bmax > off) {
+ off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
+ }
+ }
+ else {
+ off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
}
- off += apr_snprintf(buffer+off, bmax-off, ")%s", sep);
- }
- else {
- off += apr_snprintf(buffer+off, bmax-off, "%s(null)%s", tag, sep);
}
return off;
}
@@ -972,7 +1516,8 @@ apr_size_t h2_util_bb_print(char *buffer
apr_status_t h2_append_brigade(apr_bucket_brigade *to,
apr_bucket_brigade *from,
apr_off_t *plen,
- int *peos)
+ int *peos,
+ h2_bucket_gate *should_append)
{
apr_bucket *e;
apr_off_t len = 0, remain = *plen;
@@ -983,7 +1528,10 @@ apr_status_t h2_append_brigade(apr_bucke
while (!APR_BRIGADE_EMPTY(from)) {
e = APR_BRIGADE_FIRST(from);
- if (APR_BUCKET_IS_METADATA(e)) {
+ if (!should_append(e)) {
+ goto leave;
+ }
+ else if (APR_BUCKET_IS_METADATA(e)) {
if (APR_BUCKET_IS_EOS(e)) {
*peos = 1;
apr_bucket_delete(e);
@@ -1002,9 +1550,9 @@ apr_status_t h2_append_brigade(apr_bucke
if (remain < e->length) {
if (remain <= 0) {
- return APR_SUCCESS;
+ goto leave;
}
- apr_bucket_split(e, remain);
+ apr_bucket_split(e, (apr_size_t)remain);
}
}
@@ -1013,7 +1561,7 @@ apr_status_t h2_append_brigade(apr_bucke
len += e->length;
remain -= e->length;
}
-
+leave:
*plen = len;
return APR_SUCCESS;
}
@@ -1061,89 +1609,150 @@ static int count_header(void *ctx, const
return 1;
}
-#define NV_ADD_LIT_CS(nv, k, v) add_header(nv, k, sizeof(k) - 1, v, strlen(v))
-#define NV_ADD_CS_CS(nv, k, v) add_header(nv, k, strlen(k), v, strlen(v))
+static const char *inv_field_name_chr(const char *token)
+{
+ const char *p = ap_scan_http_token(token);
+ if (p == token && *p == ':') {
+ p = ap_scan_http_token(++p);
+ }
+ return (p && *p)? p : NULL;
+}
-static int add_header(h2_ngheader *ngh,
- const char *key, size_t key_len,
- const char *value, size_t val_len)
+static const char *inv_field_value_chr(const char *token)
{
- nghttp2_nv *nv = &ngh->nv[ngh->nvlen++];
-
+ const char *p = ap_scan_http_field_content(token);
+ return (p && *p)? p : NULL;
+}
+
+typedef struct ngh_ctx {
+ apr_pool_t *p;
+ int unsafe;
+ h2_ngheader *ngh;
+ apr_status_t status;
+} ngh_ctx;
+
+static int add_header(ngh_ctx *ctx, const char *key, const char *value)
+{
+ nghttp2_nv *nv = &(ctx->ngh)->nv[(ctx->ngh)->nvlen++];
+ const char *p;
+
+ if (!ctx->unsafe) {
+ if ((p = inv_field_name_chr(key))) {
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p,
+ "h2_request: head field '%s: %s' has invalid char %s",
+ key, value, p);
+ ctx->status = APR_EINVAL;
+ return 0;
+ }
+ if ((p = inv_field_value_chr(value))) {
+ ap_log_perror(APLOG_MARK, APLOG_TRACE1, APR_EINVAL, ctx->p,
+ "h2_request: head field '%s: %s' has invalid char %s",
+ key, value, p);
+ ctx->status = APR_EINVAL;
+ return 0;
+ }
+ }
nv->name = (uint8_t*)key;
- nv->namelen = key_len;
+ nv->namelen = strlen(key);
nv->value = (uint8_t*)value;
- nv->valuelen = val_len;
+ nv->valuelen = strlen(value);
+
return 1;
}
static int add_table_header(void *ctx, const char *key, const char *value)
{
if (!h2_util_ignore_header(key)) {
- add_header(ctx, key, strlen(key), value, strlen(value));
+ add_header(ctx, key, value);
}
return 1;
}
-
-h2_ngheader *h2_util_ngheader_make(apr_pool_t *p, apr_table_t *header)
+static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p,
+ int unsafe, size_t key_count,
+ const char *keys[], const char *values[],
+ apr_table_t *headers)
{
- h2_ngheader *ngh;
- size_t n;
+ ngh_ctx ctx;
+ size_t n, i;
- n = 0;
- apr_table_do(count_header, &n, header, NULL);
+ ctx.p = p;
+ ctx.unsafe = unsafe;
+
+ n = key_count;
+ apr_table_do(count_header, &n, headers, NULL);
+
+ *ph = ctx.ngh = apr_pcalloc(p, sizeof(h2_ngheader));
+ if (!ctx.ngh) {
+ return APR_ENOMEM;
+ }
+
+ ctx.ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ if (!ctx.ngh->nv) {
+ return APR_ENOMEM;
+ }
+
+ ctx.status = APR_SUCCESS;
+ for (i = 0; i < key_count; ++i) {
+ if (!add_header(&ctx, keys[i], values[i])) {
+ return ctx.status;
+ }
+ }
- ngh = apr_pcalloc(p, sizeof(h2_ngheader));
- ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
- apr_table_do(add_table_header, ngh, header, NULL);
+ apr_table_do(add_table_header, &ctx, headers, NULL);
- return ngh;
+ return ctx.status;
}
-h2_ngheader *h2_util_ngheader_make_res(apr_pool_t *p,
- int http_status,
- apr_table_t *header)
+static int is_unsafe(h2_headers *h)
{
- h2_ngheader *ngh;
- size_t n;
-
- n = 1;
- apr_table_do(count_header, &n, header, NULL);
-
- ngh = apr_pcalloc(p, sizeof(h2_ngheader));
- ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
- NV_ADD_LIT_CS(ngh, ":status", apr_psprintf(p, "%d", http_status));
- apr_table_do(add_table_header, ngh, header, NULL);
+ const char *v = apr_table_get(h->notes, H2_HDR_CONFORMANCE);
+ return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE));
+}
- return ngh;
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ h2_headers *headers)
+{
+ return ngheader_create(ph, p, is_unsafe(headers),
+ 0, NULL, NULL, headers->headers);
+}
+
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ h2_headers *headers)
+{
+ const char *keys[] = {
+ ":status"
+ };
+ const char *values[] = {
+ apr_psprintf(p, "%d", headers->status)
+ };
+ return ngheader_create(ph, p, is_unsafe(headers),
+ H2_ALEN(keys), keys, values, headers->headers);
}
-h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
- const struct h2_request *req)
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req)
{
- h2_ngheader *ngh;
- size_t n;
+ const char *keys[] = {
+ ":scheme",
+ ":authority",
+ ":path",
+ ":method",
+ };
+ const char *values[] = {
+ req->scheme,
+ req->authority,
+ req->path,
+ req->method,
+ };
- AP_DEBUG_ASSERT(req);
- AP_DEBUG_ASSERT(req->scheme);
- AP_DEBUG_ASSERT(req->authority);
- AP_DEBUG_ASSERT(req->path);
- AP_DEBUG_ASSERT(req->method);
-
- n = 4;
- apr_table_do(count_header, &n, req->headers, NULL);
-
- ngh = apr_pcalloc(p, sizeof(h2_ngheader));
- ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
- NV_ADD_LIT_CS(ngh, ":scheme", req->scheme);
- NV_ADD_LIT_CS(ngh, ":authority", req->authority);
- NV_ADD_LIT_CS(ngh, ":path", req->path);
- NV_ADD_LIT_CS(ngh, ":method", req->method);
- apr_table_do(add_table_header, ngh, req->headers, NULL);
+ ap_assert(req->scheme);
+ ap_assert(req->authority);
+ ap_assert(req->path);
+ ap_assert(req->method);
- return ngh;
+ return ngheader_create(ph, p, 0, H2_ALEN(keys), keys, values, req->headers);
}
/*******************************************************************************
@@ -1160,7 +1769,6 @@ typedef struct {
#define H2_LIT_ARGS(a) (a),H2_ALEN(a)
static literal IgnoredRequestHeaders[] = {
- H2_DEF_LITERAL("expect"),
H2_DEF_LITERAL("upgrade"),
H2_DEF_LITERAL("connection"),
H2_DEF_LITERAL("keep-alive"),
@@ -1194,15 +1802,12 @@ static literal IgnoredResponseTrailers[]
H2_DEF_LITERAL("www-authenticate"),
H2_DEF_LITERAL("proxy-authenticate"),
};
-static literal IgnoredProxyRespHds[] = {
- H2_DEF_LITERAL("alt-svc"),
-};
static int ignore_header(const literal *lits, size_t llen,
const char *name, size_t nlen)
{
const literal *lit;
- int i;
+ size_t i;
for (i = 0; i < llen; ++i) {
lit = &lits[i];
@@ -1229,13 +1834,7 @@ int h2_res_ignore_trailer(const char *na
return ignore_header(H2_LIT_ARGS(IgnoredResponseTrailers), name, len);
}
-int h2_proxy_res_ignore_header(const char *name, size_t len)
-{
- return (h2_req_ignore_header(name, len)
- || ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len));
-}
-
-apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
const char *name, size_t nlen,
const char *value, size_t vlen)
{
@@ -1276,13 +1875,12 @@ apr_status_t h2_headers_add_h1(apr_table
* h2 request handling
******************************************************************************/
-h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
- const char *scheme, const char *authority,
- const char *path, apr_table_t *header, int serialize)
+h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header, int serialize)
{
h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
- req->id = id;
req->method = method;
req->scheme = scheme;
req->authority = authority;
@@ -1294,49 +1892,6 @@ h2_request *h2_req_createn(int id, apr_p
return req;
}
-h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize)
-{
- return h2_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
-}
-
-typedef struct {
- apr_table_t *headers;
- apr_pool_t *pool;
-} h1_ctx;
-
-static int set_h1_header(void *ctx, const char *key, const char *value)
-{
- h1_ctx *x = ctx;
- size_t klen = strlen(key);
- if (!h2_req_ignore_header(key, klen)) {
- h2_headers_add_h1(x->headers, x->pool, key, klen, value, strlen(value));
- }
- return 1;
-}
-
-apr_status_t h2_req_make(h2_request *req, apr_pool_t *pool,
- const char *method, const char *scheme,
- const char *authority, const char *path,
- apr_table_t *headers)
-{
- h1_ctx x;
-
- req->method = method;
- req->scheme = scheme;
- req->authority = authority;
- req->path = path;
-
- AP_DEBUG_ASSERT(req->scheme);
- AP_DEBUG_ASSERT(req->authority);
- AP_DEBUG_ASSERT(req->path);
- AP_DEBUG_ASSERT(req->method);
-
- x.pool = pool;
- x.headers = req->headers;
- apr_table_do(set_h1_header, &x, headers, NULL);
- return APR_SUCCESS;
-}
-
/*******************************************************************************
* frame logging
******************************************************************************/
@@ -1423,11 +1978,11 @@ int h2_util_frame_print(const nghttp2_fr
/*******************************************************************************
* push policy
******************************************************************************/
-void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled)
+int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabled)
{
h2_push_policy policy = H2_PUSH_NONE;
if (push_enabled) {
- const char *val = apr_table_get(req->headers, "accept-push-policy");
+ const char *val = apr_table_get(headers, "accept-push-policy");
if (val) {
if (ap_find_token(p, val, "fast-load")) {
policy = H2_PUSH_FAST_LOAD;
@@ -1450,6 +2005,6 @@ void h2_push_policy_determine(struct h2_
policy = H2_PUSH_DEFAULT;
}
}
- req->push_policy = policy;
+ return policy;
}
diff -up --new-file httpd-2.4.23/modules/http2/h2_util.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_util.h
--- httpd-2.4.23/modules/http2/h2_util.h 2016-06-14 10:51:31.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_util.h 2017-10-13 10:37:45.000000000 +0200
@@ -68,7 +68,6 @@ void h2_ihash_remove_val(h2_ihash_t *ih,
void h2_ihash_clear(h2_ihash_t *ih);
size_t h2_ihash_shift(h2_ihash_t *ih, void **buffer, size_t max);
-size_t h2_ihash_ishift(h2_ihash_t *ih, int *buffer, size_t max);
/*******************************************************************************
* iqueue - sorted list of int with user defined ordering
@@ -116,12 +115,22 @@ int h2_iq_count(h2_iqueue *q);
/**
* Add a stream id to the queue.
*
- * @param q the queue to append the task to
+ * @param q the queue to append the id to
* @param sid the stream id to add
* @param cmp the comparator for sorting
- * @param ctx user data for comparator
+ * @param ctx user data for comparator
+ * @return != 0 iff id was not already there
*/
-void h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
+
+/**
+ * Append the id to the queue if not already present.
+ *
+ * @param q the queue to append the id to
+ * @param sid the id to append
+ * @return != 0 iff id was not already there
+ */
+int h2_iq_append(h2_iqueue *q, int sid);
/**
* Remove the stream id from the queue. Return != 0 iff task
@@ -148,19 +157,175 @@ void h2_iq_clear(h2_iqueue *q);
void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx);
/**
- * Get the first stream id from the queue or NULL if the queue is empty.
- * The task will be removed.
+ * Get the first id from the queue or 0 if the queue is empty.
+ * The id is being removed.
*
- * @param q the queue to get the first task from
- * @return the first stream id of the queue, 0 if empty
+ * @param q the queue to get the first id from
+ * @return the first id of the queue, 0 if empty
*/
int h2_iq_shift(h2_iqueue *q);
+/**
+ * Get the first max ids from the queue. All these ids will be removed.
+ *
+ * @param q the queue to get the first task from
+ * @param pint the int array to receive the values
+ * @param max the maximum number of ids to shift
+ * @return the actual number of ids shifted
+ */
+size_t h2_iq_mshift(h2_iqueue *q, int *pint, size_t max);
+
+/**
+ * Determine if int is in the queue already
+ *
+ * @parm q the queue
+ * @param sid the integer id to check for
+ * @return != 0 iff sid is already in the queue
+ */
+int h2_iq_contains(h2_iqueue *q, int sid);
+
+/*******************************************************************************
+ * FIFO queue (void* elements)
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_fifo h2_fifo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity elements. Elements can
+ * appear several times.
+ */
+apr_status_t h2_fifo_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity elements. Elements only
+ * appear once. Pushing an element already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_fifo_set_create(h2_fifo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_fifo_term(h2_fifo *fifo);
+apr_status_t h2_fifo_interrupt(h2_fifo *fifo);
+
+int h2_fifo_count(h2_fifo *fifo);
+
+/**
+ * Push en element into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param elem the element to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_fifo_push(h2_fifo *fifo, void *elem);
+apr_status_t h2_fifo_try_push(h2_fifo *fifo, void *elem);
+
+apr_status_t h2_fifo_pull(h2_fifo *fifo, void **pelem);
+apr_status_t h2_fifo_try_pull(h2_fifo *fifo, void **pelem);
+
+typedef enum {
+ H2_FIFO_OP_PULL, /* pull the element from the queue, ie discard it */
+ H2_FIFO_OP_REPUSH, /* pull and immediatley re-push it */
+} h2_fifo_op_t;
+
+typedef h2_fifo_op_t h2_fifo_peek_fn(void *head, void *ctx);
+
+/**
+ * Call given function on the head of the queue, once it exists, and
+ * perform the returned operation on it. The queue will hold its lock during
+ * this time, so no other operations on the queue are possible.
+ * @param fifo the queue to peek at
+ * @param fn the function to call on the head, once available
+ * @param ctx context to pass in call to function
+ */
+apr_status_t h2_fifo_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx);
+
+/**
+ * Non-blocking version of h2_fifo_peek.
+ */
+apr_status_t h2_fifo_try_peek(h2_fifo *fifo, h2_fifo_peek_fn *fn, void *ctx);
+
+/**
+ * Remove the elem from the queue, will remove multiple appearances.
+ * @param elem the element to remove
+ * @return APR_SUCCESS iff > 0 elems were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_fifo_remove(h2_fifo *fifo, void *elem);
+
+/*******************************************************************************
+ * iFIFO queue (int elements)
+ ******************************************************************************/
+
+/**
+ * A thread-safe FIFO queue with some extra bells and whistles, if you
+ * do not need anything special, better use 'apr_queue'.
+ */
+typedef struct h2_ififo h2_ififo;
+
+/**
+ * Create a FIFO queue that can hold up to capacity int. ints can
+ * appear several times.
+ */
+apr_status_t h2_ififo_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
+
+/**
+ * Create a FIFO set that can hold up to capacity integers. Ints only
+ * appear once. Pushing an int already present does not change the
+ * queue and is successful.
+ */
+apr_status_t h2_ififo_set_create(h2_ififo **pfifo, apr_pool_t *pool, int capacity);
+
+apr_status_t h2_ififo_term(h2_ififo *fifo);
+apr_status_t h2_ififo_interrupt(h2_ififo *fifo);
+
+int h2_ififo_count(h2_ififo *fifo);
+
+/**
+ * Push an int into the queue. Blocks if there is no capacity left.
+ *
+ * @param fifo the FIFO queue
+ * @param id the int to push
+ * @return APR_SUCCESS on push, APR_EAGAIN on try_push on a full queue,
+ * APR_EEXIST when in set mode and elem already there.
+ */
+apr_status_t h2_ififo_push(h2_ififo *fifo, int id);
+apr_status_t h2_ififo_try_push(h2_ififo *fifo, int id);
+
+apr_status_t h2_ififo_pull(h2_ififo *fifo, int *pi);
+apr_status_t h2_ififo_try_pull(h2_ififo *fifo, int *pi);
+
+typedef h2_fifo_op_t h2_ififo_peek_fn(int head, void *ctx);
+
+/**
+ * Call given function on the head of the queue, once it exists, and
+ * perform the returned operation on it. The queue will hold its lock during
+ * this time, so no other operations on the queue are possible.
+ * @param fifo the queue to peek at
+ * @param fn the function to call on the head, once available
+ * @param ctx context to pass in call to function
+ */
+apr_status_t h2_ififo_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx);
+
+/**
+ * Non-blocking version of h2_fifo_peek.
+ */
+apr_status_t h2_ififo_try_peek(h2_ififo *fifo, h2_ififo_peek_fn *fn, void *ctx);
+
+/**
+ * Remove the integer from the queue, will remove multiple appearances.
+ * @param id the integer to remove
+ * @return APR_SUCCESS iff > 0 ints were removed, APR_EAGAIN otherwise.
+ */
+apr_status_t h2_ififo_remove(h2_ififo *fifo, int id);
+
/*******************************************************************************
* common helpers
******************************************************************************/
/* h2_log2(n) iff n is a power of 2 */
-unsigned char h2_log2(apr_uint32_t n);
+unsigned char h2_log2(int n);
/**
* Count the bytes that all key/value pairs in a table have
@@ -172,15 +337,6 @@ unsigned char h2_log2(apr_uint32_t n);
*/
apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra);
-/**
- * Return != 0 iff the string s contains the token, as specified in
- * HTTP header syntax, rfc7230.
- */
-int h2_util_contains_token(apr_pool_t *pool, const char *s, const char *token);
-
-const char *h2_util_first_token_match(apr_pool_t *pool, const char *s,
- const char *tokens[], apr_size_t len);
-
/** Match a header value against a string constance, case insensitive */
#define H2_HD_MATCH_LIT(l, name, nlen) \
((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name))
@@ -191,18 +347,18 @@ const char *h2_util_first_token_match(ap
int h2_req_ignore_header(const char *name, size_t len);
int h2_req_ignore_trailer(const char *name, size_t len);
int h2_res_ignore_trailer(const char *name, size_t len);
-int h2_proxy_res_ignore_header(const char *name, size_t len);
/**
* Set the push policy for the given request. Takes request headers into
* account, see draft https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00
* for details.
*
- * @param req the request to determine the policy for
+ * @param headers the http headers to inspect
* @param p the pool to use
* @param push_enabled if HTTP/2 server push is generally enabled for this request
+ * @return the push policy desired
*/
-void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled);
+int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabled);
/*******************************************************************************
* base64 url encoding, different table from normal base64
@@ -241,19 +397,21 @@ const char *h2_util_base64url_encode(con
int h2_util_ignore_header(const char *name);
+struct h2_headers;
+
typedef struct h2_ngheader {
nghttp2_nv *nv;
apr_size_t nvlen;
} h2_ngheader;
-h2_ngheader *h2_util_ngheader_make(apr_pool_t *p, apr_table_t *header);
-h2_ngheader *h2_util_ngheader_make_res(apr_pool_t *p,
- int http_status,
- apr_table_t *header);
-h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p,
- const struct h2_request *req);
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ struct h2_headers *headers);
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ struct h2_headers *headers);
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req);
-apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
+apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
const char *name, size_t nlen,
const char *value, size_t vlen);
@@ -261,16 +419,10 @@ apr_status_t h2_headers_add_h1(apr_table
* h2_request helpers
******************************************************************************/
-struct h2_request *h2_req_createn(int id, apr_pool_t *pool, const char *method,
- const char *scheme, const char *authority,
- const char *path, apr_table_t *header,
- int serialize);
-struct h2_request *h2_req_create(int id, apr_pool_t *pool, int serialize);
-
-apr_status_t h2_req_make(struct h2_request *req, apr_pool_t *pool,
- const char *method, const char *scheme,
- const char *authority, const char *path,
- apr_table_t *headers);
+struct h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header,
+ int serialize);
/*******************************************************************************
* apr brigade helpers
@@ -358,14 +510,15 @@ do { \
const char *line = "(null)"; \
apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \
- ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld-%d): %s", \
- (c)->id, (int)(sid), (len? buffer : line)); \
+ ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \
+ ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \
} while(0)
+typedef int h2_bucket_gate(apr_bucket *b);
/**
* Transfer buckets from one brigade to another with a limit on the
- * maximum amount of bytes transfered. Does no setaside magic, lifetime
+ * maximum amount of bytes transferred. Does no setaside magic, lifetime
* of brigades must fit.
* @param to brigade to transfer buckets to
* @param from brigades to remove buckets from
@@ -375,7 +528,8 @@ do { \
apr_status_t h2_append_brigade(apr_bucket_brigade *to,
apr_bucket_brigade *from,
apr_off_t *plen,
- int *peos);
+ int *peos,
+ h2_bucket_gate *should_append);
/**
* Get an approximnation of the memory footprint of the given
diff -up --new-file httpd-2.4.23/modules/http2/h2_version.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_version.h
--- httpd-2.4.23/modules/http2/h2_version.h 2016-06-15 12:06:21.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_version.h 2017-10-13 10:37:45.000000000 +0200
@@ -26,7 +26,7 @@
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "1.5.11"
+#define MOD_HTTP2_VERSION "1.10.12"
/**
* @macro
@@ -34,7 +34,7 @@
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x01050b
+#define MOD_HTTP2_VERSION_NUM 0x010a0b
#endif /* mod_h2_h2_version_h */
diff -up --new-file httpd-2.4.23/modules/http2/h2_worker.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_worker.c
--- httpd-2.4.23/modules/http2/h2_worker.c 2016-05-18 17:10:20.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_worker.c 1970-01-01 01:00:00.000000000 +0100
@@ -1,103 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-
-#include <apr_thread_cond.h>
-
-#include <mpm_common.h>
-#include <httpd.h>
-#include <http_core.h>
-#include <http_log.h>
-
-#include "h2.h"
-#include "h2_private.h"
-#include "h2_conn.h"
-#include "h2_ctx.h"
-#include "h2_h2.h"
-#include "h2_mplx.h"
-#include "h2_task.h"
-#include "h2_worker.h"
-
-static void* APR_THREAD_FUNC execute(apr_thread_t *thread, void *wctx)
-{
- h2_worker *worker = (h2_worker *)wctx;
- int sticky;
-
- while (!worker->aborted) {
- h2_task *task;
-
- /* Get a h2_task from the main workers queue. */
- worker->get_next(worker, worker->ctx, &task, &sticky);
- while (task) {
-
- h2_task_do(task, thread);
- /* report the task done and maybe get another one from the same
- * mplx (= master connection), if we can be sticky.
- */
- if (sticky && !worker->aborted) {
- h2_mplx_task_done(task->mplx, task, &task);
- }
- else {
- h2_mplx_task_done(task->mplx, task, NULL);
- task = NULL;
- }
- }
- }
-
- worker->worker_done(worker, worker->ctx);
- return NULL;
-}
-
-h2_worker *h2_worker_create(int id,
- apr_pool_t *pool,
- apr_threadattr_t *attr,
- h2_worker_mplx_next_fn *get_next,
- h2_worker_done_fn *worker_done,
- void *ctx)
-{
- h2_worker *w = apr_pcalloc(pool, sizeof(h2_worker));
- if (w) {
- w->id = id;
- APR_RING_ELEM_INIT(w, link);
- w->get_next = get_next;
- w->worker_done = worker_done;
- w->ctx = ctx;
- apr_thread_create(&w->thread, attr, execute, w, pool);
- }
- return w;
-}
-
-apr_status_t h2_worker_destroy(h2_worker *worker)
-{
- if (worker->thread) {
- apr_status_t status;
- apr_thread_join(&status, worker->thread);
- worker->thread = NULL;
- }
- return APR_SUCCESS;
-}
-
-void h2_worker_abort(h2_worker *worker)
-{
- worker->aborted = 1;
-}
-
-int h2_worker_is_aborted(h2_worker *worker)
-{
- return worker->aborted;
-}
-
-
diff -up --new-file httpd-2.4.23/modules/http2/h2_worker.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_worker.h
--- httpd-2.4.23/modules/http2/h2_worker.h 2016-04-28 14:43:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_worker.h 1970-01-01 01:00:00.000000000 +0100
@@ -1,135 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_worker__
-#define __mod_h2__h2_worker__
-
-struct h2_mplx;
-struct h2_request;
-struct h2_task;
-
-/* h2_worker is a basically a apr_thread_t that reads fromt he h2_workers
- * task queue and runs h2_tasks it is given.
- */
-typedef struct h2_worker h2_worker;
-
-/* Invoked when the worker wants a new task to process. Will block
- * until a h2_mplx becomes available or the worker itself
- * gets aborted (idle timeout, for example). */
-typedef apr_status_t h2_worker_mplx_next_fn(h2_worker *worker,
- void *ctx,
- struct h2_task **ptask,
- int *psticky);
-
-/* Invoked just before the worker thread exits. */
-typedef void h2_worker_done_fn(h2_worker *worker, void *ctx);
-
-
-struct h2_worker {
- int id;
- /** Links to the rest of the workers */
- APR_RING_ENTRY(h2_worker) link;
- apr_thread_t *thread;
- h2_worker_mplx_next_fn *get_next;
- h2_worker_done_fn *worker_done;
- void *ctx;
- int aborted;
-};
-
-/**
- * The magic pointer value that indicates the head of a h2_worker list
- * @param b The worker list
- * @return The magic pointer value
- */
-#define H2_WORKER_LIST_SENTINEL(b) APR_RING_SENTINEL((b), h2_worker, link)
-
-/**
- * Determine if the worker list is empty
- * @param b The list to check
- * @return true or false
- */
-#define H2_WORKER_LIST_EMPTY(b) APR_RING_EMPTY((b), h2_worker, link)
-
-/**
- * Return the first worker in a list
- * @param b The list to query
- * @return The first worker in the list
- */
-#define H2_WORKER_LIST_FIRST(b) APR_RING_FIRST(b)
-
-/**
- * Return the last worker in a list
- * @param b The list to query
- * @return The last worker int he list
- */
-#define H2_WORKER_LIST_LAST(b) APR_RING_LAST(b)
-
-/**
- * Insert a single worker at the front of a list
- * @param b The list to add to
- * @param e The worker to insert
- */
-#define H2_WORKER_LIST_INSERT_HEAD(b, e) do { \
- h2_worker *ap__b = (e); \
- APR_RING_INSERT_HEAD((b), ap__b, h2_worker, link); \
- } while (0)
-
-/**
- * Insert a single worker at the end of a list
- * @param b The list to add to
- * @param e The worker to insert
- */
-#define H2_WORKER_LIST_INSERT_TAIL(b, e) do { \
- h2_worker *ap__b = (e); \
- APR_RING_INSERT_TAIL((b), ap__b, h2_worker, link); \
- } while (0)
-
-/**
- * Get the next worker in the list
- * @param e The current worker
- * @return The next worker
- */
-#define H2_WORKER_NEXT(e) APR_RING_NEXT((e), link)
-/**
- * Get the previous worker in the list
- * @param e The current worker
- * @return The previous worker
- */
-#define H2_WORKER_PREV(e) APR_RING_PREV((e), link)
-
-/**
- * Remove a worker from its list
- * @param e The worker to remove
- */
-#define H2_WORKER_REMOVE(e) APR_RING_REMOVE((e), link)
-
-
-/* Create a new worker with given id, pool and attributes, callbacks
- * callback parameter.
- */
-h2_worker *h2_worker_create(int id,
- apr_pool_t *pool,
- apr_threadattr_t *attr,
- h2_worker_mplx_next_fn *get_next,
- h2_worker_done_fn *worker_done,
- void *ctx);
-
-apr_status_t h2_worker_destroy(h2_worker *worker);
-
-void h2_worker_abort(h2_worker *worker);
-
-int h2_worker_is_aborted(h2_worker *worker);
-
-#endif /* defined(__mod_h2__h2_worker__) */
diff -up --new-file httpd-2.4.23/modules/http2/h2_workers.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_workers.c
--- httpd-2.4.23/modules/http2/h2_workers.c 2016-04-28 14:43:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_workers.c 2017-10-13 10:37:45.000000000 +0200
@@ -27,224 +27,265 @@
#include "h2_private.h"
#include "h2_mplx.h"
#include "h2_task.h"
-#include "h2_worker.h"
#include "h2_workers.h"
+#include "h2_util.h"
+typedef struct h2_slot h2_slot;
+struct h2_slot {
+ int id;
+ h2_slot *next;
+ h2_workers *workers;
+ int aborted;
+ int sticks;
+ h2_task *task;
+ apr_thread_t *thread;
+ apr_thread_mutex_t *lock;
+ apr_thread_cond_t *not_idle;
+};
-static int in_list(h2_workers *workers, h2_mplx *m)
+static h2_slot *pop_slot(h2_slot **phead)
{
- h2_mplx *e;
- for (e = H2_MPLX_LIST_FIRST(&workers->mplxs);
- e != H2_MPLX_LIST_SENTINEL(&workers->mplxs);
- e = H2_MPLX_NEXT(e)) {
- if (e == m) {
- return 1;
+ /* Atomically pop a slot from the list */
+ for (;;) {
+ h2_slot *first = *phead;
+ if (first == NULL) {
+ return NULL;
+ }
+ if (apr_atomic_casptr((void*)phead, first->next, first) == first) {
+ first->next = NULL;
+ return first;
}
}
- return 0;
}
-static void cleanup_zombies(h2_workers *workers, int lock)
+static void push_slot(h2_slot **phead, h2_slot *slot)
{
- if (lock) {
- apr_thread_mutex_lock(workers->lock);
- }
- while (!H2_WORKER_LIST_EMPTY(&workers->zombies)) {
- h2_worker *zombie = H2_WORKER_LIST_FIRST(&workers->zombies);
- H2_WORKER_REMOVE(zombie);
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_workers: cleanup zombie %d", zombie->id);
- h2_worker_destroy(zombie);
- }
- if (lock) {
- apr_thread_mutex_unlock(workers->lock);
+ /* Atomically push a slot to the list */
+ ap_assert(!slot->next);
+ for (;;) {
+ h2_slot *next = slot->next = *phead;
+ if (apr_atomic_casptr((void*)phead, slot, next) == next) {
+ return;
+ }
}
}
-static h2_task *next_task(h2_workers *workers)
+static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx);
+
+static apr_status_t activate_slot(h2_workers *workers, h2_slot *slot)
{
- h2_task *task = NULL;
- h2_mplx *last = NULL;
- int has_more;
+ apr_status_t status;
- /* Get the next h2_mplx to process that has a task to hand out.
- * If it does, place it at the end of the queu and return the
- * task to the worker.
- * If it (currently) has no tasks, remove it so that it needs
- * to register again for scheduling.
- * If we run out of h2_mplx in the queue, we need to wait for
- * new mplx to arrive. Depending on how many workers do exist,
- * we do a timed wait or block indefinitely.
- */
- while (!task && !H2_MPLX_LIST_EMPTY(&workers->mplxs)) {
- h2_mplx *m = H2_MPLX_LIST_FIRST(&workers->mplxs);
-
- if (last == m) {
- break;
+ slot->workers = workers;
+ slot->aborted = 0;
+ slot->task = NULL;
+
+ if (!slot->lock) {
+ status = apr_thread_mutex_create(&slot->lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ if (status != APR_SUCCESS) {
+ push_slot(&workers->free, slot);
+ return status;
}
- H2_MPLX_REMOVE(m);
- --workers->mplx_count;
-
- task = h2_mplx_pop_task(m, &has_more);
- if (has_more) {
- H2_MPLX_LIST_INSERT_TAIL(&workers->mplxs, m);
- ++workers->mplx_count;
- if (!last) {
- last = m;
- }
+ }
+
+ if (!slot->not_idle) {
+ status = apr_thread_cond_create(&slot->not_idle, workers->pool);
+ if (status != APR_SUCCESS) {
+ push_slot(&workers->free, slot);
+ return status;
}
}
- return task;
+
+ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
+ "h2_workers: new thread for slot %d", slot->id);
+ /* thread will either immediately start work or add itself
+ * to the idle queue */
+ apr_thread_create(&slot->thread, workers->thread_attr, slot_run, slot,
+ workers->pool);
+ if (!slot->thread) {
+ push_slot(&workers->free, slot);
+ return APR_ENOMEM;
+ }
+
+ apr_atomic_inc32(&workers->worker_count);
+ return APR_SUCCESS;
+}
+
+static apr_status_t add_worker(h2_workers *workers)
+{
+ h2_slot *slot = pop_slot(&workers->free);
+ if (slot) {
+ return activate_slot(workers, slot);
+ }
+ return APR_EAGAIN;
+}
+
+static void wake_idle_worker(h2_workers *workers)
+{
+ h2_slot *slot = pop_slot(&workers->idle);
+ if (slot) {
+ apr_thread_mutex_lock(slot->lock);
+ apr_thread_cond_signal(slot->not_idle);
+ apr_thread_mutex_unlock(slot->lock);
+ }
+ else if (workers->dynamic) {
+ add_worker(workers);
+ }
+}
+
+static void cleanup_zombies(h2_workers *workers)
+{
+ h2_slot *slot;
+ while ((slot = pop_slot(&workers->zombies))) {
+ if (slot->thread) {
+ apr_status_t status;
+ apr_thread_join(&status, slot->thread);
+ slot->thread = NULL;
+ }
+ apr_atomic_dec32(&workers->worker_count);
+ slot->next = NULL;
+ push_slot(&workers->free, slot);
+ }
+}
+
+static apr_status_t slot_pull_task(h2_slot *slot, h2_mplx *m)
+{
+ apr_status_t rv;
+
+ rv = h2_mplx_pop_task(m, &slot->task);
+ if (slot->task) {
+ /* Ok, we got something to give back to the worker for execution.
+ * If we still have idle workers, we let the worker be sticky,
+ * e.g. making it poll the task's h2_mplx instance for more work
+ * before asking back here. */
+ slot->sticks = slot->workers->max_workers;
+ return rv;
+ }
+ slot->sticks = 0;
+ return APR_EOF;
+}
+
+static h2_fifo_op_t mplx_peek(void *head, void *ctx)
+{
+ h2_mplx *m = head;
+ h2_slot *slot = ctx;
+
+ if (slot_pull_task(slot, m) == APR_EAGAIN) {
+ wake_idle_worker(slot->workers);
+ return H2_FIFO_OP_REPUSH;
+ }
+ return H2_FIFO_OP_PULL;
}
/**
* Get the next task for the given worker. Will block until a task arrives
* or the max_wait timer expires and more than min workers exist.
*/
-static apr_status_t get_mplx_next(h2_worker *worker, void *ctx,
- h2_task **ptask, int *psticky)
+static apr_status_t get_next(h2_slot *slot)
{
+ h2_workers *workers = slot->workers;
apr_status_t status;
- apr_time_t wait_until = 0, now;
- h2_workers *workers = ctx;
- h2_task *task = NULL;
- *ptask = NULL;
- *psticky = 0;
-
- status = apr_thread_mutex_lock(workers->lock);
- if (status == APR_SUCCESS) {
- ++workers->idle_workers;
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_worker(%d): looking for work", worker->id);
-
- while (!h2_worker_is_aborted(worker) && !workers->aborted
- && !(task = next_task(workers))) {
-
- /* Need to wait for a new tasks to arrive. If we are above
- * minimum workers, we do a timed wait. When timeout occurs
- * and we have still more workers, we shut down one after
- * the other. */
- cleanup_zombies(workers, 0);
- if (workers->worker_count > workers->min_workers) {
- now = apr_time_now();
- if (now >= wait_until) {
- wait_until = now + apr_time_from_sec(workers->max_idle_secs);
- }
-
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_worker(%d): waiting signal, "
- "workers=%d, idle=%d", worker->id,
- (int)workers->worker_count,
- workers->idle_workers);
- status = apr_thread_cond_timedwait(workers->mplx_added,
- workers->lock,
- wait_until - now);
- if (status == APR_TIMEUP
- && workers->worker_count > workers->min_workers) {
- /* waited long enough without getting a task and
- * we are above min workers, abort this one. */
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0,
- workers->s,
- "h2_workers: aborting idle worker");
- h2_worker_abort(worker);
- break;
- }
- }
- else {
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_worker(%d): waiting signal (eternal), "
- "worker_count=%d, idle=%d", worker->id,
- (int)workers->worker_count,
- workers->idle_workers);
- apr_thread_cond_wait(workers->mplx_added, workers->lock);
+ slot->task = NULL;
+ while (!slot->aborted) {
+ if (!slot->task) {
+ status = h2_fifo_try_peek(workers->mplxs, mplx_peek, slot);
+ if (status == APR_EOF) {
+ return status;
}
}
- /* Here, we either have gotten task or decided to shut down
- * the calling worker.
- */
- if (task) {
- /* Ok, we got something to give back to the worker for execution.
- * If we have more idle workers than h2_mplx in our queue, then
- * we let the worker be sticky, e.g. making it poll the task's
- * h2_mplx instance for more work before asking back here.
- * This avoids entering our global lock as long as enough idle
- * workers remain. Stickiness of a worker ends when the connection
- * has no new tasks to process, so the worker will get back here
- * eventually.
- */
- *ptask = task;
- *psticky = (workers->max_workers >= workers->mplx_count);
-
- if (workers->mplx_count && workers->idle_workers > 1) {
- apr_thread_cond_signal(workers->mplx_added);
- }
+ if (slot->task) {
+ return APR_SUCCESS;
}
- --workers->idle_workers;
- apr_thread_mutex_unlock(workers->lock);
+ cleanup_zombies(workers);
+
+ apr_thread_mutex_lock(slot->lock);
+ push_slot(&workers->idle, slot);
+ apr_thread_cond_wait(slot->not_idle, slot->lock);
+ apr_thread_mutex_unlock(slot->lock);
}
-
- return *ptask? APR_SUCCESS : APR_EOF;
+ return APR_EOF;
}
-static void worker_done(h2_worker *worker, void *ctx)
+static void slot_done(h2_slot *slot)
{
- h2_workers *workers = ctx;
- apr_status_t status = apr_thread_mutex_lock(workers->lock);
- if (status == APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_worker(%d): done", worker->id);
- H2_WORKER_REMOVE(worker);
- --workers->worker_count;
- H2_WORKER_LIST_INSERT_TAIL(&workers->zombies, worker);
-
- apr_thread_mutex_unlock(workers->lock);
- }
+ push_slot(&(slot->workers->zombies), slot);
}
-static apr_status_t add_worker(h2_workers *workers)
+
+static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx)
{
- h2_worker *w = h2_worker_create(workers->next_worker_id++,
- workers->pool, workers->thread_attr,
- get_mplx_next, worker_done, workers);
- if (!w) {
- return APR_ENOMEM;
+ h2_slot *slot = wctx;
+
+ while (!slot->aborted) {
+
+ /* Get a h2_task from the mplxs queue. */
+ get_next(slot);
+ while (slot->task) {
+
+ h2_task_do(slot->task, thread, slot->id);
+
+ /* Report the task as done. If stickyness is left, offer the
+ * mplx the opportunity to give us back a new task right away.
+ */
+ if (!slot->aborted && (--slot->sticks > 0)) {
+ h2_mplx_task_done(slot->task->mplx, slot->task, &slot->task);
+ }
+ else {
+ h2_mplx_task_done(slot->task->mplx, slot->task, NULL);
+ slot->task = NULL;
+ }
+ }
}
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_workers: adding worker(%d)", w->id);
- ++workers->worker_count;
- H2_WORKER_LIST_INSERT_TAIL(&workers->workers, w);
- return APR_SUCCESS;
+
+ slot_done(slot);
+ return NULL;
}
-static apr_status_t h2_workers_start(h2_workers *workers)
+static apr_status_t workers_pool_cleanup(void *data)
{
- apr_status_t status = apr_thread_mutex_lock(workers->lock);
- if (status == APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_workers: starting");
-
- while (workers->worker_count < workers->min_workers
- && status == APR_SUCCESS) {
- status = add_worker(workers);
+ h2_workers *workers = data;
+ h2_slot *slot;
+
+ if (!workers->aborted) {
+ workers->aborted = 1;
+ /* abort all idle slots */
+ for (;;) {
+ slot = pop_slot(&workers->idle);
+ if (slot) {
+ apr_thread_mutex_lock(slot->lock);
+ slot->aborted = 1;
+ apr_thread_cond_signal(slot->not_idle);
+ apr_thread_mutex_unlock(slot->lock);
+ }
+ else {
+ break;
+ }
}
- apr_thread_mutex_unlock(workers->lock);
+
+ h2_fifo_term(workers->mplxs);
+ h2_fifo_interrupt(workers->mplxs);
+
+ cleanup_zombies(workers);
}
- return status;
+ return APR_SUCCESS;
}
h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool,
int min_workers, int max_workers,
- apr_size_t max_tx_handles)
+ int idle_secs)
{
apr_status_t status;
h2_workers *workers;
apr_pool_t *pool;
+ int i, n;
- AP_DEBUG_ASSERT(s);
- AP_DEBUG_ASSERT(server_pool);
+ ap_assert(s);
+ ap_assert(server_pool);
/* let's have our own pool that will be parent to all h2_worker
* instances we create. This happens in various threads, but always
@@ -254,163 +295,77 @@ h2_workers *h2_workers_create(server_rec
apr_pool_create(&pool, server_pool);
apr_pool_tag(pool, "h2_workers");
workers = apr_pcalloc(pool, sizeof(h2_workers));
- if (workers) {
- workers->s = s;
- workers->pool = pool;
- workers->min_workers = min_workers;
- workers->max_workers = max_workers;
- workers->max_idle_secs = 10;
-
- workers->max_tx_handles = max_tx_handles;
- workers->spare_tx_handles = workers->max_tx_handles;
-
- apr_threadattr_create(&workers->thread_attr, workers->pool);
- if (ap_thread_stacksize != 0) {
- apr_threadattr_stacksize_set(workers->thread_attr,
- ap_thread_stacksize);
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
- "h2_workers: using stacksize=%ld",
- (long)ap_thread_stacksize);
- }
-
- APR_RING_INIT(&workers->workers, h2_worker, link);
- APR_RING_INIT(&workers->zombies, h2_worker, link);
- APR_RING_INIT(&workers->mplxs, h2_mplx, link);
-
- status = apr_thread_mutex_create(&workers->lock,
- APR_THREAD_MUTEX_DEFAULT,
- workers->pool);
- if (status == APR_SUCCESS) {
- status = apr_thread_cond_create(&workers->mplx_added, workers->pool);
- }
-
- if (status == APR_SUCCESS) {
- status = apr_thread_mutex_create(&workers->tx_lock,
- APR_THREAD_MUTEX_DEFAULT,
- workers->pool);
- }
-
- if (status == APR_SUCCESS) {
- status = h2_workers_start(workers);
- }
-
- if (status != APR_SUCCESS) {
- h2_workers_destroy(workers);
- workers = NULL;
- }
+ if (!workers) {
+ return NULL;
}
- return workers;
-}
-
-void h2_workers_destroy(h2_workers *workers)
-{
- /* before we go, cleanup any zombie workers that may have accumulated */
- cleanup_zombies(workers, 1);
- if (workers->mplx_added) {
- apr_thread_cond_destroy(workers->mplx_added);
- workers->mplx_added = NULL;
- }
- if (workers->lock) {
- apr_thread_mutex_destroy(workers->lock);
- workers->lock = NULL;
- }
- while (!H2_MPLX_LIST_EMPTY(&workers->mplxs)) {
- h2_mplx *m = H2_MPLX_LIST_FIRST(&workers->mplxs);
- H2_MPLX_REMOVE(m);
+ workers->s = s;
+ workers->pool = pool;
+ workers->min_workers = min_workers;
+ workers->max_workers = max_workers;
+ workers->max_idle_secs = (idle_secs > 0)? idle_secs : 10;
+
+ status = h2_fifo_create(&workers->mplxs, pool, 2 * workers->max_workers);
+ if (status != APR_SUCCESS) {
+ return NULL;
}
- while (!H2_WORKER_LIST_EMPTY(&workers->workers)) {
- h2_worker *w = H2_WORKER_LIST_FIRST(&workers->workers);
- H2_WORKER_REMOVE(w);
+
+ status = apr_threadattr_create(&workers->thread_attr, workers->pool);
+ if (status != APR_SUCCESS) {
+ return NULL;
}
- if (workers->pool) {
- apr_pool_destroy(workers->pool);
- /* workers is gone */
+
+ if (ap_thread_stacksize != 0) {
+ apr_threadattr_stacksize_set(workers->thread_attr,
+ ap_thread_stacksize);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: using stacksize=%ld",
+ (long)ap_thread_stacksize);
}
-}
-
-apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m)
-{
- apr_status_t status = apr_thread_mutex_lock(workers->lock);
- if (status == APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_TRACE3, status, workers->s,
- "h2_workers: register mplx(%ld), idle=%d",
- m->id, workers->idle_workers);
- if (in_list(workers, m)) {
- status = APR_EAGAIN;
- }
- else {
- H2_MPLX_LIST_INSERT_TAIL(&workers->mplxs, m);
- ++workers->mplx_count;
- status = APR_SUCCESS;
- }
-
- if (workers->idle_workers > 0) {
- apr_thread_cond_signal(workers->mplx_added);
+
+ status = apr_thread_mutex_create(&workers->lock,
+ APR_THREAD_MUTEX_DEFAULT,
+ workers->pool);
+ if (status == APR_SUCCESS) {
+ n = workers->nslots = workers->max_workers;
+ workers->slots = apr_pcalloc(workers->pool, n * sizeof(h2_slot));
+ if (workers->slots == NULL) {
+ workers->nslots = 0;
+ status = APR_ENOMEM;
}
- else if (status == APR_SUCCESS
- && workers->worker_count < workers->max_workers) {
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s,
- "h2_workers: got %d worker, adding 1",
- workers->worker_count);
- add_worker(workers);
+ for (i = 0; i < n; ++i) {
+ workers->slots[i].id = i;
}
- apr_thread_mutex_unlock(workers->lock);
}
- return status;
-}
-
-apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m)
-{
- apr_status_t status = apr_thread_mutex_lock(workers->lock);
if (status == APR_SUCCESS) {
- status = APR_EAGAIN;
- if (in_list(workers, m)) {
- H2_MPLX_REMOVE(m);
- status = APR_SUCCESS;
+ /* we activate all for now, TODO: support min_workers again.
+ * do this in reverse for vanity reasons so slot 0 will most
+ * likely be at head of idle queue. */
+ n = workers->max_workers;
+ for (i = n-1; i >= 0; --i) {
+ status = activate_slot(workers, &workers->slots[i]);
+ }
+ /* the rest of the slots go on the free list */
+ for(i = n; i < workers->nslots; ++i) {
+ push_slot(&workers->free, &workers->slots[i]);
}
- apr_thread_mutex_unlock(workers->lock);
+ workers->dynamic = (workers->worker_count < workers->max_workers);
}
- return status;
-}
-
-void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs)
-{
- if (idle_secs <= 0) {
- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, workers->s,
- APLOGNO(02962) "h2_workers: max_worker_idle_sec value of %d"
- " is not valid, ignored.", idle_secs);
- return;
+ if (status == APR_SUCCESS) {
+ apr_pool_pre_cleanup_register(pool, workers, workers_pool_cleanup);
+ return workers;
}
- workers->max_idle_secs = idle_secs;
+ return NULL;
}
-apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count)
+apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m)
{
- apr_status_t status = apr_thread_mutex_lock(workers->tx_lock);
- if (status == APR_SUCCESS) {
- count = H2MIN(workers->spare_tx_handles, count);
- workers->spare_tx_handles -= count;
- ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
- "h2_workers: reserved %d tx handles, %d/%d left",
- (int)count, (int)workers->spare_tx_handles,
- (int)workers->max_tx_handles);
- apr_thread_mutex_unlock(workers->tx_lock);
- return count;
- }
- return 0;
+ apr_status_t status = h2_fifo_push(workers->mplxs, m);
+ wake_idle_worker(workers);
+ return status;
}
-void h2_workers_tx_free(h2_workers *workers, apr_size_t count)
+apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m)
{
- apr_status_t status = apr_thread_mutex_lock(workers->tx_lock);
- if (status == APR_SUCCESS) {
- workers->spare_tx_handles += count;
- ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s,
- "h2_workers: freed %d tx handles, %d/%d left",
- (int)count, (int)workers->spare_tx_handles,
- (int)workers->max_tx_handles);
- apr_thread_mutex_unlock(workers->tx_lock);
- }
+ return h2_fifo_remove(workers->mplxs, m);
}
-
diff -up --new-file httpd-2.4.23/modules/http2/h2_workers.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_workers.h
--- httpd-2.4.23/modules/http2/h2_workers.h 2016-03-02 12:21:28.000000000 +0100
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/h2_workers.h 2017-04-10 17:04:55.000000000 +0200
@@ -27,6 +27,9 @@ struct apr_thread_cond_t;
struct h2_mplx;
struct h2_request;
struct h2_task;
+struct h2_fifo;
+
+struct h2_slot;
typedef struct h2_workers h2_workers;
@@ -37,26 +40,24 @@ struct h2_workers {
int next_worker_id;
int min_workers;
int max_workers;
- int worker_count;
- int idle_workers;
int max_idle_secs;
- apr_size_t max_tx_handles;
- apr_size_t spare_tx_handles;
-
- unsigned int aborted : 1;
+ int aborted;
+ int dynamic;
apr_threadattr_t *thread_attr;
+ int nslots;
+ struct h2_slot *slots;
+
+ volatile apr_uint32_t worker_count;
+
+ struct h2_slot *free;
+ struct h2_slot *idle;
+ struct h2_slot *zombies;
- APR_RING_HEAD(h2_worker_list, h2_worker) workers;
- APR_RING_HEAD(h2_worker_zombies, h2_worker) zombies;
- APR_RING_HEAD(h2_mplx_list, h2_mplx) mplxs;
- int mplx_count;
+ struct h2_fifo *mplxs;
struct apr_thread_mutex_t *lock;
- struct apr_thread_cond_t *mplx_added;
-
- struct apr_thread_mutex_t *tx_lock;
};
@@ -64,12 +65,7 @@ struct h2_workers {
* threads.
*/
h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool,
- int min_size, int max_size,
- apr_size_t max_tx_handles);
-
-/* Destroy the worker pool and all its threads.
- */
-void h2_workers_destroy(h2_workers *workers);
+ int min_size, int max_size, int idle_secs);
/**
* Registers a h2_mplx for task scheduling. If this h2_mplx runs
@@ -83,38 +79,4 @@ apr_status_t h2_workers_register(h2_work
*/
apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m);
-/**
- * Set the amount of seconds a h2_worker should wait for new tasks
- * before shutting down (if there are more than the minimum number of
- * workers).
- */
-void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs);
-
-/**
- * Reservation of file handles available for transfer between workers
- * and master connections.
- *
- * When handling output from request processing, file handles are often
- * encountered when static files are served. The most efficient way is then
- * to forward the handle itself to the master connection where it can be
- * read or sendfile'd to the client. But file handles are a scarce resource,
- * so there needs to be a limit on how many handles are transferred this way.
- *
- * h2_workers keeps track of the number of reserved handles and observes a
- * configurable maximum value.
- *
- * @param workers the workers instance
- * @param count how many handles the caller wishes to reserve
- * @return the number of reserved handles, may be 0.
- */
-apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count);
-
-/**
- * Return a number of reserved file handles back to the pool. The number
- * overall may not exceed the numbers reserved.
- * @param workers the workers instance
- * @param count how many handles are returned to the pool
- */
-void h2_workers_tx_free(h2_workers *workers, apr_size_t count);
-
#endif /* defined(__mod_h2__h2_workers__) */
diff -up --new-file httpd-2.4.23/modules/http2/mod_http2.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.c
--- httpd-2.4.23/modules/http2/mod_http2.c 2016-04-28 14:43:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.c 2017-07-04 14:34:15.000000000 +0200
@@ -47,10 +47,10 @@ static void h2_hooks(apr_pool_t *pool);
AP_DECLARE_MODULE(http2) = {
STANDARD20_MODULE_STUFF,
- NULL,
- NULL,
+ h2_config_create_dir, /* func to create per dir config */
+ h2_config_merge_dir, /* func to merge per dir config */
h2_config_create_svr, /* func to create per server config */
- h2_config_merge, /* func to merge per server config */
+ h2_config_merge_svr, /* func to merge per server config */
h2_cmds, /* command handlers */
h2_hooks
};
@@ -60,9 +60,12 @@ static int h2_h2_fixups(request_rec *r);
typedef struct {
unsigned int change_prio : 1;
unsigned int sha256 : 1;
+ unsigned int inv_headers : 1;
+ unsigned int dyn_windows : 1;
} features;
static features myfeats;
+static int mpm_warned;
/* The module initialization. Called once as apache hook, before any multi
* processing (threaded or not) happens. It is typically at least called twice,
@@ -84,16 +87,20 @@ static int h2_post_config(apr_pool_t *p,
const char *mod_h2_init_key = "mod_http2_init_counter";
nghttp2_info *ngh2;
apr_status_t status;
- const char *sep = "";
(void)plog;(void)ptemp;
#ifdef H2_NG2_CHANGE_PRIO
myfeats.change_prio = 1;
- sep = "+";
#endif
#ifdef H2_OPENSSL
myfeats.sha256 = 1;
#endif
+#ifdef H2_NG2_INVALID_HEADER_CB
+ myfeats.inv_headers = 1;
+#endif
+#ifdef H2_NG2_LOCAL_WIN_SIZE
+ myfeats.dyn_windows = 1;
+#endif
apr_pool_userdata_get(&data, mod_h2_init_key, s->process->pool);
if ( data == NULL ) {
@@ -106,10 +113,12 @@ static int h2_post_config(apr_pool_t *p,
ngh2 = nghttp2_version(0);
ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03090)
- "mod_http2 (v%s, feats=%s%s%s, nghttp2 %s), initializing...",
+ "mod_http2 (v%s, feats=%s%s%s%s, nghttp2 %s), initializing...",
MOD_HTTP2_VERSION,
- myfeats.change_prio? "CHPRIO" : "", sep,
- myfeats.sha256? "SHA256" : "",
+ myfeats.change_prio? "CHPRIO" : "",
+ myfeats.sha256? "+SHA256" : "",
+ myfeats.inv_headers? "+INVHD" : "",
+ myfeats.dyn_windows? "+DWINS" : "",
ngh2? ngh2->version_str : "unknown");
switch (h2_conn_mpm_type()) {
@@ -133,6 +142,17 @@ static int h2_post_config(apr_pool_t *p,
break;
}
+ if (!h2_mpm_supported() && !mpm_warned) {
+ mpm_warned = 1;
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10034)
+ "The mpm module (%s) is not supported by mod_http2. The mpm determines "
+ "how things are processed in your server. HTTP/2 has more demands in "
+ "this regard and the currently selected mpm will just not do. "
+ "This is an advisory warning. Your server will continue to work, but "
+ "the HTTP/2 protocol will be inactive.",
+ h2_conn_mpm_name());
+ }
+
status = h2_h2_init(p, s);
if (status == APR_SUCCESS) {
status = h2_switch_init(p, s);
@@ -157,15 +177,16 @@ static apr_status_t http2_req_engine_pus
static apr_status_t http2_req_engine_pull(h2_req_engine *ngn,
apr_read_type_e block,
- apr_uint32_t capacity,
+ int capacity,
request_rec **pr)
{
return h2_mplx_req_engine_pull(ngn, block, capacity, pr);
}
-static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn)
+static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
+ apr_status_t status)
{
- h2_mplx_req_engine_done(ngn, r_conn);
+ h2_mplx_req_engine_done(ngn, r_conn, status);
}
/* Runs once per created child process. Perform any process
@@ -230,8 +251,11 @@ static const char *val_H2_PUSH(apr_pool_
if (ctx) {
if (r) {
h2_task *task = h2_ctx_get_task(ctx);
- if (task && task->request->push_policy != H2_PUSH_NONE) {
- return "on";
+ if (task) {
+ h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
+ if (stream && stream->push_policy != H2_PUSH_NONE) {
+ return "on";
+ }
}
}
else if (c && h2_session_push_enabled(ctx->session)) {
@@ -265,7 +289,10 @@ static const char *val_H2_PUSHED_ON(apr_
if (ctx) {
h2_task *task = h2_ctx_get_task(ctx);
if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
- return apr_itoa(p, task->request->initiated_on);
+ h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
+ if (stream) {
+ return apr_itoa(p, stream->initiated_on);
+ }
}
}
return "";
@@ -334,7 +361,7 @@ static char *http2_var_lookup(apr_pool_t
return (char *)vdef->lookup(p, s, c, r, ctx);
}
}
- return "";
+ return (char*)"";
}
static int h2_h2_fixups(request_rec *r)
diff -up --new-file httpd-2.4.23/modules/http2/mod_http2.dep /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.dep
--- httpd-2.4.23/modules/http2/mod_http2.dep 2016-04-28 23:18:43.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.dep 2017-04-01 03:22:41.000000000 +0200
@@ -57,57 +57,6 @@
".\h2_util.h"\
-./h2_bucket_eoc.c : \
- "..\..\include\ap_config.h"\
- "..\..\include\ap_config_layout.h"\
- "..\..\include\ap_expr.h"\
- "..\..\include\ap_hooks.h"\
- "..\..\include\ap_mmn.h"\
- "..\..\include\ap_regex.h"\
- "..\..\include\ap_release.h"\
- "..\..\include\apache_noprobes.h"\
- "..\..\include\http_config.h"\
- "..\..\include\http_connection.h"\
- "..\..\include\http_core.h"\
- "..\..\include\http_log.h"\
- "..\..\include\httpd.h"\
- "..\..\include\os.h"\
- "..\..\include\util_cfgtree.h"\
- "..\..\include\util_filter.h"\
- "..\..\srclib\apr-util\include\apr_buckets.h"\
- "..\..\srclib\apr-util\include\apr_hooks.h"\
- "..\..\srclib\apr-util\include\apr_optional.h"\
- "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
- "..\..\srclib\apr-util\include\apr_queue.h"\
- "..\..\srclib\apr-util\include\apr_uri.h"\
- "..\..\srclib\apr-util\include\apu.h"\
- "..\..\srclib\apr\include\apr.h"\
- "..\..\srclib\apr\include\apr_allocator.h"\
- "..\..\srclib\apr\include\apr_errno.h"\
- "..\..\srclib\apr\include\apr_file_info.h"\
- "..\..\srclib\apr\include\apr_file_io.h"\
- "..\..\srclib\apr\include\apr_general.h"\
- "..\..\srclib\apr\include\apr_hash.h"\
- "..\..\srclib\apr\include\apr_inherit.h"\
- "..\..\srclib\apr\include\apr_mmap.h"\
- "..\..\srclib\apr\include\apr_network_io.h"\
- "..\..\srclib\apr\include\apr_poll.h"\
- "..\..\srclib\apr\include\apr_pools.h"\
- "..\..\srclib\apr\include\apr_ring.h"\
- "..\..\srclib\apr\include\apr_tables.h"\
- "..\..\srclib\apr\include\apr_thread_mutex.h"\
- "..\..\srclib\apr\include\apr_thread_proc.h"\
- "..\..\srclib\apr\include\apr_time.h"\
- "..\..\srclib\apr\include\apr_user.h"\
- "..\..\srclib\apr\include\apr_want.h"\
- ".\h2.h"\
- ".\h2_bucket_eoc.h"\
- ".\h2_conn_io.h"\
- ".\h2_mplx.h"\
- ".\h2_private.h"\
- ".\h2_session.h"\
-
-
./h2_bucket_eos.c : \
"..\..\include\ap_config.h"\
"..\..\include\ap_config_layout.h"\
@@ -283,7 +232,6 @@
".\h2_stream.h"\
".\h2_task.h"\
".\h2_version.h"\
- ".\h2_worker.h"\
".\h2_workers.h"\
@@ -339,7 +287,6 @@
"..\..\srclib\apr\include\apr_user.h"\
"..\..\srclib\apr\include\apr_want.h"\
".\h2.h"\
- ".\h2_bucket_eoc.h"\
".\h2_bucket_eos.h"\
".\h2_config.h"\
".\h2_conn_io.h"\
@@ -454,7 +401,6 @@
".\h2_private.h"\
".\h2_push.h"\
".\h2_request.h"\
- ".\h2_response.h"\
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
@@ -517,7 +463,6 @@
".\h2.h"\
".\h2_from_h1.h"\
".\h2_private.h"\
- ".\h2_response.h"\
".\h2_task.h"\
".\h2_util.h"\
@@ -648,7 +593,6 @@
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_request.h"\
- ".\h2_response.h"\
".\h2_task.h"\
".\h2_util.h"\
@@ -753,11 +697,9 @@
".\h2_ngn_shed.h"\
".\h2_private.h"\
".\h2_request.h"\
- ".\h2_response.h"\
".\h2_stream.h"\
".\h2_task.h"\
".\h2_util.h"\
- ".\h2_worker.h"\
".\h2_workers.h"\
".\mod_http2.h"\
@@ -815,7 +757,6 @@
".\h2_ngn_shed.h"\
".\h2_private.h"\
".\h2_request.h"\
- ".\h2_response.h"\
".\h2_task.h"\
".\h2_util.h"\
".\mod_http2.h"\
@@ -870,7 +811,6 @@
".\h2_private.h"\
".\h2_push.h"\
".\h2_request.h"\
- ".\h2_response.h"\
".\h2_session.h"\
".\h2_stream.h"\
".\h2_util.h"\
@@ -937,58 +877,6 @@
".\h2_util.h"\
-./h2_response.c : \
- "..\..\include\ap_config.h"\
- "..\..\include\ap_config_layout.h"\
- "..\..\include\ap_expr.h"\
- "..\..\include\ap_hooks.h"\
- "..\..\include\ap_mmn.h"\
- "..\..\include\ap_regex.h"\
- "..\..\include\ap_release.h"\
- "..\..\include\apache_noprobes.h"\
- "..\..\include\http_config.h"\
- "..\..\include\http_core.h"\
- "..\..\include\http_log.h"\
- "..\..\include\httpd.h"\
- "..\..\include\os.h"\
- "..\..\include\util_cfgtree.h"\
- "..\..\include\util_filter.h"\
- "..\..\include\util_time.h"\
- "..\..\srclib\apr-util\include\apr_buckets.h"\
- "..\..\srclib\apr-util\include\apr_hooks.h"\
- "..\..\srclib\apr-util\include\apr_optional.h"\
- "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
- "..\..\srclib\apr-util\include\apr_uri.h"\
- "..\..\srclib\apr-util\include\apu.h"\
- "..\..\srclib\apr\include\apr.h"\
- "..\..\srclib\apr\include\apr_allocator.h"\
- "..\..\srclib\apr\include\apr_errno.h"\
- "..\..\srclib\apr\include\apr_file_info.h"\
- "..\..\srclib\apr\include\apr_file_io.h"\
- "..\..\srclib\apr\include\apr_general.h"\
- "..\..\srclib\apr\include\apr_hash.h"\
- "..\..\srclib\apr\include\apr_inherit.h"\
- "..\..\srclib\apr\include\apr_mmap.h"\
- "..\..\srclib\apr\include\apr_network_io.h"\
- "..\..\srclib\apr\include\apr_poll.h"\
- "..\..\srclib\apr\include\apr_pools.h"\
- "..\..\srclib\apr\include\apr_ring.h"\
- "..\..\srclib\apr\include\apr_strings.h"\
- "..\..\srclib\apr\include\apr_tables.h"\
- "..\..\srclib\apr\include\apr_thread_mutex.h"\
- "..\..\srclib\apr\include\apr_thread_proc.h"\
- "..\..\srclib\apr\include\apr_time.h"\
- "..\..\srclib\apr\include\apr_user.h"\
- "..\..\srclib\apr\include\apr_want.h"\
- ".\h2.h"\
- ".\h2_filter.h"\
- ".\h2_h2.h"\
- ".\h2_private.h"\
- ".\h2_request.h"\
- ".\h2_response.h"\
- ".\h2_util.h"\
-
-
./h2_session.c : \
"..\..\include\ap_config.h"\
"..\..\include\ap_config_layout.h"\
@@ -1042,7 +930,6 @@
"..\..\srclib\apr\include\apr_user.h"\
"..\..\srclib\apr\include\apr_want.h"\
".\h2.h"\
- ".\h2_bucket_eoc.h"\
".\h2_bucket_eos.h"\
".\h2_config.h"\
".\h2_conn_io.h"\
@@ -1054,7 +941,6 @@
".\h2_private.h"\
".\h2_push.h"\
".\h2_request.h"\
- ".\h2_response.h"\
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
@@ -1117,7 +1003,6 @@
".\h2_private.h"\
".\h2_push.h"\
".\h2_request.h"\
- ".\h2_response.h"\
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
@@ -1251,7 +1136,6 @@
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
- ".\h2_worker.h"\
./h2_task_input.c : \
@@ -1361,7 +1245,6 @@
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_request.h"\
- ".\h2_response.h"\
".\h2_session.h"\
".\h2_stream.h"\
".\h2_task.h"\
@@ -1417,67 +1300,6 @@
".\h2_util.h"\
-./h2_worker.c : \
- "..\..\include\ap_config.h"\
- "..\..\include\ap_config_layout.h"\
- "..\..\include\ap_expr.h"\
- "..\..\include\ap_hooks.h"\
- "..\..\include\ap_mmn.h"\
- "..\..\include\ap_mpm.h"\
- "..\..\include\ap_regex.h"\
- "..\..\include\ap_release.h"\
- "..\..\include\apache_noprobes.h"\
- "..\..\include\http_config.h"\
- "..\..\include\http_core.h"\
- "..\..\include\http_log.h"\
- "..\..\include\httpd.h"\
- "..\..\include\mpm_common.h"\
- "..\..\include\os.h"\
- "..\..\include\scoreboard.h"\
- "..\..\include\util_cfgtree.h"\
- "..\..\include\util_filter.h"\
- "..\..\srclib\apr-util\include\apr_buckets.h"\
- "..\..\srclib\apr-util\include\apr_hooks.h"\
- "..\..\srclib\apr-util\include\apr_optional.h"\
- "..\..\srclib\apr-util\include\apr_optional_hooks.h"\
- "..\..\srclib\apr-util\include\apr_queue.h"\
- "..\..\srclib\apr-util\include\apr_uri.h"\
- "..\..\srclib\apr-util\include\apu.h"\
- "..\..\srclib\apr\include\apr.h"\
- "..\..\srclib\apr\include\apr_allocator.h"\
- "..\..\srclib\apr\include\apr_dso.h"\
- "..\..\srclib\apr\include\apr_errno.h"\
- "..\..\srclib\apr\include\apr_file_info.h"\
- "..\..\srclib\apr\include\apr_file_io.h"\
- "..\..\srclib\apr\include\apr_general.h"\
- "..\..\srclib\apr\include\apr_global_mutex.h"\
- "..\..\srclib\apr\include\apr_hash.h"\
- "..\..\srclib\apr\include\apr_inherit.h"\
- "..\..\srclib\apr\include\apr_mmap.h"\
- "..\..\srclib\apr\include\apr_network_io.h"\
- "..\..\srclib\apr\include\apr_poll.h"\
- "..\..\srclib\apr\include\apr_pools.h"\
- "..\..\srclib\apr\include\apr_portable.h"\
- "..\..\srclib\apr\include\apr_proc_mutex.h"\
- "..\..\srclib\apr\include\apr_ring.h"\
- "..\..\srclib\apr\include\apr_shm.h"\
- "..\..\srclib\apr\include\apr_tables.h"\
- "..\..\srclib\apr\include\apr_thread_cond.h"\
- "..\..\srclib\apr\include\apr_thread_mutex.h"\
- "..\..\srclib\apr\include\apr_thread_proc.h"\
- "..\..\srclib\apr\include\apr_time.h"\
- "..\..\srclib\apr\include\apr_user.h"\
- "..\..\srclib\apr\include\apr_want.h"\
- ".\h2.h"\
- ".\h2_conn.h"\
- ".\h2_ctx.h"\
- ".\h2_h2.h"\
- ".\h2_mplx.h"\
- ".\h2_private.h"\
- ".\h2_task.h"\
- ".\h2_worker.h"\
-
-
./h2_workers.c : \
"..\..\include\ap_config.h"\
"..\..\include\ap_config_layout.h"\
@@ -1534,7 +1356,6 @@
".\h2_mplx.h"\
".\h2_private.h"\
".\h2_task.h"\
- ".\h2_worker.h"\
".\h2_workers.h"\
diff -up --new-file httpd-2.4.23/modules/http2/mod_http2.dsp /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.dsp
--- httpd-2.4.23/modules/http2/mod_http2.dsp 2016-04-28 14:43:02.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.dsp 2017-04-01 03:22:41.000000000 +0200
@@ -109,10 +109,6 @@ SOURCE=./h2_bucket_beam.c
# End Source File
# Begin Source File
-SOURCE=./h2_bucket_eoc.c
-# End Source File
-# Begin Source File
-
SOURCE=./h2_bucket_eos.c
# End Source File
# Begin Source File
@@ -161,7 +157,7 @@ SOURCE=./h2_request.c
# End Source File
# Begin Source File
-SOURCE=./h2_response.c
+SOURCE=./h2_headers.c
# End Source File
# Begin Source File
@@ -185,10 +181,6 @@ SOURCE=./h2_util.c
# End Source File
# Begin Source File
-SOURCE=./h2_worker.c
-# End Source File
-# Begin Source File
-
SOURCE=./h2_workers.c
# End Source File
# Begin Source File
diff -up --new-file httpd-2.4.23/modules/http2/mod_http2.h /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.h
--- httpd-2.4.23/modules/http2/mod_http2.h 2016-05-23 12:55:29.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.h 2016-11-01 21:24:52.000000000 +0100
@@ -49,7 +49,7 @@ typedef apr_status_t http2_req_engine_in
const char *id,
const char *type,
apr_pool_t *pool,
- apr_uint32_t req_buffer_size,
+ apr_size_t req_buffer_size,
request_rec *r,
http2_output_consumed **pconsumed,
void **pbaton);
@@ -75,8 +75,9 @@ APR_DECLARE_OPTIONAL_FN(apr_status_t,
/**
* Get a new request for processing in this engine.
* @param engine the engine which is done processing the slave
- * @param timeout wait a maximum amount of time for a new slave, 0 will not wait
- * @param pslave the slave connection that needs processing or NULL
+ * @param block if call should block waiting for request to come
+ * @param capacity how many parallel requests are acceptable
+ * @param pr the request that needs processing or NULL
* @return APR_SUCCESS if new request was assigned
* APR_EAGAIN if no new request is available
* APR_EOF if engine may shut down, as no more request will be scheduled
@@ -85,9 +86,10 @@ APR_DECLARE_OPTIONAL_FN(apr_status_t,
APR_DECLARE_OPTIONAL_FN(apr_status_t,
http2_req_engine_pull, (h2_req_engine *engine,
apr_read_type_e block,
- apr_uint32_t capacity,
+ int capacity,
request_rec **pr));
APR_DECLARE_OPTIONAL_FN(void,
http2_req_engine_done, (h2_req_engine *engine,
- conn_rec *rconn));
+ conn_rec *rconn,
+ apr_status_t status));
#endif
diff -up --new-file httpd-2.4.23/modules/http2/mod_http2.mak /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.mak
--- httpd-2.4.23/modules/http2/mod_http2.mak 2016-04-28 23:18:43.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_http2.mak 2017-04-01 03:22:41.000000000 +0200
@@ -51,7 +51,6 @@ CLEAN :
!ENDIF
-@erase "$(INTDIR)\h2_alt_svc.obj"
-@erase "$(INTDIR)\h2_bucket_beam.obj"
- -@erase "$(INTDIR)\h2_bucket_eoc.obj"
-@erase "$(INTDIR)\h2_bucket_eos.obj"
-@erase "$(INTDIR)\h2_config.obj"
-@erase "$(INTDIR)\h2_conn.obj"
@@ -60,17 +59,16 @@ CLEAN :
-@erase "$(INTDIR)\h2_filter.obj"
-@erase "$(INTDIR)\h2_from_h1.obj"
-@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_headers.obj"
-@erase "$(INTDIR)\h2_mplx.obj"
-@erase "$(INTDIR)\h2_ngn_shed.obj"
-@erase "$(INTDIR)\h2_push.obj"
-@erase "$(INTDIR)\h2_request.obj"
- -@erase "$(INTDIR)\h2_response.obj"
-@erase "$(INTDIR)\h2_session.obj"
-@erase "$(INTDIR)\h2_stream.obj"
-@erase "$(INTDIR)\h2_switch.obj"
-@erase "$(INTDIR)\h2_task.obj"
-@erase "$(INTDIR)\h2_util.obj"
- -@erase "$(INTDIR)\h2_worker.obj"
-@erase "$(INTDIR)\h2_workers.obj"
-@erase "$(INTDIR)\mod_http2.obj"
-@erase "$(INTDIR)\mod_http2.res"
@@ -130,7 +128,6 @@ LINK32_FLAGS=kernel32.lib nghttp2.lib /n
LINK32_OBJS= \
"$(INTDIR)\h2_alt_svc.obj" \
"$(INTDIR)\h2_bucket_beam.obj" \
- "$(INTDIR)\h2_bucket_eoc.obj" \
"$(INTDIR)\h2_bucket_eos.obj" \
"$(INTDIR)\h2_config.obj" \
"$(INTDIR)\h2_conn.obj" \
@@ -139,17 +136,16 @@ LINK32_OBJS= \
"$(INTDIR)\h2_filter.obj" \
"$(INTDIR)\h2_from_h1.obj" \
"$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_headers.obj" \
"$(INTDIR)\h2_mplx.obj" \
"$(INTDIR)\h2_ngn_shed.obj" \
"$(INTDIR)\h2_push.obj" \
"$(INTDIR)\h2_request.obj" \
- "$(INTDIR)\h2_response.obj" \
"$(INTDIR)\h2_session.obj" \
"$(INTDIR)\h2_stream.obj" \
"$(INTDIR)\h2_switch.obj" \
"$(INTDIR)\h2_task.obj" \
"$(INTDIR)\h2_util.obj" \
- "$(INTDIR)\h2_worker.obj" \
"$(INTDIR)\h2_workers.obj" \
"$(INTDIR)\mod_http2.obj" \
"$(INTDIR)\mod_http2.res" \
@@ -201,7 +197,6 @@ CLEAN :
!ENDIF
-@erase "$(INTDIR)\h2_alt_svc.obj"
-@erase "$(INTDIR)\h2_bucket_beam.obj"
- -@erase "$(INTDIR)\h2_bucket_eoc.obj"
-@erase "$(INTDIR)\h2_bucket_eos.obj"
-@erase "$(INTDIR)\h2_config.obj"
-@erase "$(INTDIR)\h2_conn.obj"
@@ -210,17 +205,16 @@ CLEAN :
-@erase "$(INTDIR)\h2_filter.obj"
-@erase "$(INTDIR)\h2_from_h1.obj"
-@erase "$(INTDIR)\h2_h2.obj"
+ -@erase "$(INTDIR)\h2_headers.obj"
-@erase "$(INTDIR)\h2_mplx.obj"
-@erase "$(INTDIR)\h2_ngn_shed.obj"
-@erase "$(INTDIR)\h2_push.obj"
-@erase "$(INTDIR)\h2_request.obj"
- -@erase "$(INTDIR)\h2_response.obj"
-@erase "$(INTDIR)\h2_session.obj"
-@erase "$(INTDIR)\h2_stream.obj"
-@erase "$(INTDIR)\h2_switch.obj"
-@erase "$(INTDIR)\h2_task.obj"
-@erase "$(INTDIR)\h2_util.obj"
- -@erase "$(INTDIR)\h2_worker.obj"
-@erase "$(INTDIR)\h2_workers.obj"
-@erase "$(INTDIR)\mod_http2.obj"
-@erase "$(INTDIR)\mod_http2.res"
@@ -280,7 +274,6 @@ LINK32_FLAGS=kernel32.lib nghttp2d.lib /
LINK32_OBJS= \
"$(INTDIR)\h2_alt_svc.obj" \
"$(INTDIR)\h2_bucket_beam.obj" \
- "$(INTDIR)\h2_bucket_eoc.obj" \
"$(INTDIR)\h2_bucket_eos.obj" \
"$(INTDIR)\h2_config.obj" \
"$(INTDIR)\h2_conn.obj" \
@@ -289,17 +282,16 @@ LINK32_OBJS= \
"$(INTDIR)\h2_filter.obj" \
"$(INTDIR)\h2_from_h1.obj" \
"$(INTDIR)\h2_h2.obj" \
+ "$(INTDIR)\h2_headers.obj" \
"$(INTDIR)\h2_mplx.obj" \
"$(INTDIR)\h2_ngn_shed.obj" \
"$(INTDIR)\h2_push.obj" \
"$(INTDIR)\h2_request.obj" \
- "$(INTDIR)\h2_response.obj" \
"$(INTDIR)\h2_session.obj" \
"$(INTDIR)\h2_stream.obj" \
"$(INTDIR)\h2_switch.obj" \
"$(INTDIR)\h2_task.obj" \
"$(INTDIR)\h2_util.obj" \
- "$(INTDIR)\h2_worker.obj" \
"$(INTDIR)\h2_workers.obj" \
"$(INTDIR)\mod_http2.obj" \
"$(INTDIR)\mod_http2.res" \
@@ -427,11 +419,6 @@ SOURCE=./h2_bucket_beam.c
"$(INTDIR)/h2_bucket_beam.obj" : $(SOURCE) "$(INTDIR)"
-SOURCE=./h2_bucket_eoc.c
-
-"$(INTDIR)\h2_bucket_eoc.obj" : $(SOURCE) "$(INTDIR)"
-
-
SOURCE=./h2_bucket_eos.c
"$(INTDIR)\h2_bucket_eos.obj" : $(SOURCE) "$(INTDIR)"
@@ -472,6 +459,11 @@ SOURCE=./h2_h2.c
"$(INTDIR)\h2_h2.obj" : $(SOURCE) "$(INTDIR)"
+SOURCE=./h2_headers.c
+
+"$(INTDIR)\h2_headers.obj" : $(SOURCE) "$(INTDIR)"
+
+
SOURCE=./h2_mplx.c
"$(INTDIR)\h2_mplx.obj" : $(SOURCE) "$(INTDIR)"
@@ -492,11 +484,6 @@ SOURCE=./h2_request.c
"$(INTDIR)\h2_request.obj" : $(SOURCE) "$(INTDIR)"
-SOURCE=./h2_response.c
-
-"$(INTDIR)\h2_response.obj" : $(SOURCE) "$(INTDIR)"
-
-
SOURCE=./h2_session.c
"$(INTDIR)\h2_session.obj" : $(SOURCE) "$(INTDIR)"
@@ -522,11 +509,6 @@ SOURCE=./h2_util.c
"$(INTDIR)\h2_util.obj" : $(SOURCE) "$(INTDIR)"
-SOURCE=./h2_worker.c
-
-"$(INTDIR)\h2_worker.obj" : $(SOURCE) "$(INTDIR)"
-
-
SOURCE=./h2_workers.c
"$(INTDIR)\h2_workers.obj" : $(SOURCE) "$(INTDIR)"
diff -up --new-file httpd-2.4.23/modules/http2/mod_proxy_http2.c /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_proxy_http2.c
--- httpd-2.4.23/modules/http2/mod_proxy_http2.c 2016-06-28 15:36:22.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/mod_proxy_http2.c 2017-04-10 17:04:55.000000000 +0200
@@ -26,6 +26,8 @@
#include "h2_version.h"
#include "h2_proxy_session.h"
+#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
+
static void register_hook(apr_pool_t *p);
AP_DECLARE_MODULE(proxy_http2) = {
@@ -44,9 +46,10 @@ static apr_status_t (*req_engine_push)(c
http2_req_engine_init *einit);
static apr_status_t (*req_engine_pull)(h2_req_engine *engine,
apr_read_type_e block,
- apr_uint32_t capacity,
+ int capacity,
request_rec **pr);
-static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn);
+static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn,
+ apr_status_t status);
typedef struct h2_proxy_ctx {
conn_rec *owner;
@@ -63,9 +66,9 @@ typedef struct h2_proxy_ctx {
const char *engine_id;
const char *engine_type;
apr_pool_t *engine_pool;
- apr_uint32_t req_buffer_size;
- request_rec *next;
- apr_size_t capacity;
+ apr_size_t req_buffer_size;
+ h2_proxy_fifo *requests;
+ int capacity;
unsigned standalone : 1;
unsigned is_ssl : 1;
@@ -168,7 +171,7 @@ static int proxy_http2_canon(request_rec
path = url; /* this is the raw path */
}
else {
- path = ap_proxy_canonenc(r->pool, url, strlen(url),
+ path = ap_proxy_canonenc(r->pool, url, (int)strlen(url),
enc_path, 0, r->proxyreq);
search = r->args;
}
@@ -210,43 +213,30 @@ static apr_status_t proxy_engine_init(h2
const char *id,
const char *type,
apr_pool_t *pool,
- apr_uint32_t req_buffer_size,
+ apr_size_t req_buffer_size,
request_rec *r,
http2_output_consumed **pconsumed,
void **pctx)
{
h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
&proxy_http2_module);
- if (ctx) {
- conn_rec *c = ctx->owner;
- h2_proxy_ctx *nctx;
-
- /* we need another lifetime for this. If we do not host
- * an engine, the context lives in r->pool. Since we expect
- * to server more than r, we need to live longer */
- nctx = apr_pcalloc(pool, sizeof(*nctx));
- if (nctx == NULL) {
- return APR_ENOMEM;
- }
- memcpy(nctx, ctx, sizeof(*nctx));
- ctx = nctx;
- ctx->pool = pool;
- ctx->engine = engine;
- ctx->engine_id = id;
- ctx->engine_type = type;
- ctx->engine_pool = pool;
- ctx->req_buffer_size = req_buffer_size;
- ctx->capacity = 100;
-
- ap_set_module_config(c->conn_config, &proxy_http2_module, ctx);
-
- *pconsumed = out_consumed;
- *pctx = ctx;
- return APR_SUCCESS;
- }
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368)
- "h2_proxy_session, engine init, no ctx found");
- return APR_ENOTIMPL;
+ if (!ctx) {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368)
+ "h2_proxy_session, engine init, no ctx found");
+ return APR_ENOTIMPL;
+ }
+
+ ctx->pool = pool;
+ ctx->engine = engine;
+ ctx->engine_id = id;
+ ctx->engine_type = type;
+ ctx->engine_pool = pool;
+ ctx->req_buffer_size = req_buffer_size;
+ ctx->capacity = H2MIN(100, h2_proxy_fifo_capacity(ctx->requests));
+
+ *pconsumed = out_consumed;
+ *pctx = ctx;
+ return APR_SUCCESS;
}
static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
@@ -269,65 +259,74 @@ static apr_status_t add_request(h2_proxy
return status;
}
-static void request_done(h2_proxy_session *session, request_rec *r,
- int complete, int touched)
+static void request_done(h2_proxy_ctx *ctx, request_rec *r,
+ apr_status_t status, int touched)
{
- h2_proxy_ctx *ctx = session->user_data;
const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
-
- if (!complete && !touched) {
- /* untouched request, need rescheduling */
- if (req_engine_push && is_h2 && is_h2(ctx->owner)) {
- if (req_engine_push(ctx->engine_type, r, NULL) == APR_SUCCESS) {
- /* push to engine */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
- APLOGNO(03369)
- "h2_proxy_session(%s): rescheduled request %s",
- ctx->engine_id, task_id);
- return;
- }
- }
- }
-
- if (r == ctx->rbase && complete) {
- ctx->r_status = APR_SUCCESS;
- }
-
- if (complete) {
- if (req_engine_done && ctx->engine) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
- APLOGNO(03370)
- "h2_proxy_session(%s): finished request %s",
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
+ "h2_proxy_session(%s): request done %s, touched=%d",
+ ctx->engine_id, task_id, touched);
+ if (status != APR_SUCCESS) {
+ if (!touched) {
+ /* untouched request, need rescheduling */
+ status = h2_proxy_fifo_push(ctx->requests, r);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+ APLOGNO(03369)
+ "h2_proxy_session(%s): rescheduled request %s",
ctx->engine_id, task_id);
- req_engine_done(ctx->engine, r->connection);
+ return;
}
- }
- else {
- if (req_engine_done && ctx->engine) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection,
- APLOGNO(03371)
- "h2_proxy_session(%s): failed request %s",
- ctx->engine_id, task_id);
- req_engine_done(ctx->engine, r->connection);
+ else {
+ const char *uri;
+ uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+ APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s "
+ "not complete, cannot repeat",
+ ctx->engine_id, task_id, uri);
}
}
+
+ if (r == ctx->rbase) {
+ ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS
+ : HTTP_SERVICE_UNAVAILABLE);
+ }
+
+ if (req_engine_done && ctx->engine) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
+ APLOGNO(03370)
+ "h2_proxy_session(%s): finished request %s",
+ ctx->engine_id, task_id);
+ req_engine_done(ctx->engine, r->connection, status);
+ }
}
+static void session_req_done(h2_proxy_session *session, request_rec *r,
+ apr_status_t status, int touched)
+{
+ request_done(session->user_data, r, status, touched);
+}
+
static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave)
{
- if (ctx->next) {
+ if (h2_proxy_fifo_count(ctx->requests) > 0) {
return APR_SUCCESS;
}
else if (req_engine_pull && ctx->engine) {
apr_status_t status;
+ request_rec *r = NULL;
+
status = req_engine_pull(ctx->engine, before_leave?
APR_BLOCK_READ: APR_NONBLOCK_READ,
- ctx->capacity, &ctx->next);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, ctx->owner,
- "h2_proxy_engine(%s): pulled request (%s) %s",
- ctx->engine_id,
- before_leave? "before leave" : "regular",
- (ctx->next? ctx->next->the_request : "NULL"));
+ ctx->capacity, &r);
+ if (status == APR_SUCCESS && r) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, ctx->owner,
+ "h2_proxy_engine(%s): pulled request (%s) %s",
+ ctx->engine_id,
+ before_leave? "before leave" : "regular",
+ r->the_request);
+ h2_proxy_fifo_push(ctx->requests, r);
+ }
return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status;
}
return APR_EOF;
@@ -335,15 +334,19 @@ static apr_status_t next_request(h2_prox
static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
apr_status_t status = OK;
+ int h2_front;
+ request_rec *r;
/* Step Four: Send the Request in a new HTTP/2 stream and
* loop until we got the response or encounter errors.
*/
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
"eng(%s): setup session", ctx->engine_id);
- ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf,
- 30, h2_log2(ctx->req_buffer_size),
- request_done);
+ h2_front = is_h2? is_h2(ctx->owner) : 0;
+ ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf,
+ h2_front, 30,
+ h2_proxy_log2((int)ctx->req_buffer_size),
+ session_req_done);
if (!ctx->session) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner,
APLOGNO(03372) "session unavailable");
@@ -354,10 +357,9 @@ static apr_status_t proxy_engine_run(h2_
"eng(%s): run session %s", ctx->engine_id, ctx->session->id);
ctx->session->user_data = ctx;
- while (1) {
- if (ctx->next) {
- add_request(ctx->session, ctx->next);
- ctx->next = NULL;
+ while (!ctx->owner->aborted) {
+ if (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
+ add_request(ctx->session, r);
}
status = h2_proxy_session_process(ctx->session);
@@ -367,7 +369,8 @@ static apr_status_t proxy_engine_run(h2_
/* ongoing processing, call again */
if (ctx->session->remote_max_concurrent > 0
&& ctx->session->remote_max_concurrent != ctx->capacity) {
- ctx->capacity = ctx->session->remote_max_concurrent;
+ ctx->capacity = H2MIN((int)ctx->session->remote_max_concurrent,
+ h2_proxy_fifo_capacity(ctx->requests));
}
s2 = next_request(ctx, 0);
if (s2 == APR_ECONNABORTED) {
@@ -375,10 +378,16 @@ static apr_status_t proxy_engine_run(h2_
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner,
APLOGNO(03374) "eng(%s): pull request",
ctx->engine_id);
- status = s2;
+ /* give notice that we're leaving and cancel all ongoing
+ * streams. */
+ next_request(ctx, 1);
+ h2_proxy_session_cancel_all(ctx->session);
+ h2_proxy_session_process(ctx->session);
+ status = ctx->r_status = APR_SUCCESS;
break;
}
- if (!ctx->next && h2_ihash_empty(ctx->session->streams)) {
+ if ((h2_proxy_fifo_count(ctx->requests) == 0)
+ && h2_proxy_ihash_empty(ctx->session->streams)) {
break;
}
}
@@ -392,7 +401,7 @@ static apr_status_t proxy_engine_run(h2_
* a) be reopened on the new session iff safe to do so
* b) reported as done (failed) otherwise
*/
- h2_proxy_session_cleanup(ctx->session, request_done);
+ h2_proxy_session_cleanup(ctx->session, session_req_done);
break;
}
}
@@ -403,7 +412,7 @@ static apr_status_t proxy_engine_run(h2_
return status;
}
-static h2_proxy_ctx *push_request_somewhere(h2_proxy_ctx *ctx)
+static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx, request_rec *r)
{
conn_rec *c = ctx->owner;
const char *engine_type, *hostname;
@@ -413,21 +422,15 @@ static h2_proxy_ctx *push_request_somewh
engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
ctx->server_portstr);
- if (c->master && req_engine_push && ctx->next && is_h2 && is_h2(c)) {
+ if (c->master && req_engine_push && r && is_h2 && is_h2(c)) {
/* If we are have req_engine capabilities, push the handling of this
* request (e.g. slave connection) to a proxy_http2 engine which
* uses the same backend. We may be called to create an engine
* ourself. */
- if (req_engine_push(engine_type, ctx->next, proxy_engine_init)
- == APR_SUCCESS) {
- /* to renew the lifetime, we might have set a new ctx */
- ctx = ap_get_module_config(c->conn_config, &proxy_http2_module);
+ if (req_engine_push(engine_type, r, proxy_engine_init) == APR_SUCCESS) {
if (ctx->engine == NULL) {
- /* Another engine instance has taken over processing of this
- * request. */
- ctx->r_status = SUSPENDED;
- ctx->next = NULL;
- return ctx;
+ /* request has been assigned to an engine in another thread */
+ return SUSPENDED;
}
}
}
@@ -448,7 +451,8 @@ static h2_proxy_ctx *push_request_somewh
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"H2: hosting engine %s", ctx->engine_id);
}
- return ctx;
+
+ return h2_proxy_fifo_push(ctx->requests, r);
}
static int proxy_http2_handler(request_rec *r,
@@ -465,7 +469,7 @@ static int proxy_http2_handler(request_r
apr_status_t status;
h2_proxy_ctx *ctx;
apr_uri_t uri;
- int reconnected = 0;
+ int reconnects = 0;
/* find the scheme */
if ((url[0] != 'h' && url[0] != 'H') || url[1] != '2') {
@@ -490,6 +494,7 @@ static int proxy_http2_handler(request_r
default:
return DECLINED;
}
+
ctx = apr_pcalloc(r->pool, sizeof(*ctx));
ctx->owner = r->connection;
ctx->pool = r->pool;
@@ -501,8 +506,9 @@ static int proxy_http2_handler(request_r
ctx->conf = conf;
ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0;
ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
- ctx->next = r;
- r = NULL;
+
+ h2_proxy_fifo_set_create(&ctx->requests, ctx->pool, 100);
+
ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
/* scheme says, this is for us. */
@@ -548,10 +554,11 @@ run_connect:
/* If we are not already hosting an engine, try to push the request
* to an already existing engine or host a new engine here. */
- if (!ctx->engine) {
- ctx = push_request_somewhere(ctx);
+ if (r && !ctx->engine) {
+ ctx->r_status = push_request_somewhere(ctx, r);
+ r = NULL;
if (ctx->r_status == SUSPENDED) {
- /* request was pushed to another engine */
+ /* request was pushed to another thread, leave processing here */
goto cleanup;
}
}
@@ -561,37 +568,39 @@ run_connect:
* backend->hostname. */
if (ap_proxy_connect_backend(ctx->proxy_func, ctx->p_conn, ctx->worker,
ctx->server)) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, ctx->owner, APLOGNO(03352)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352)
"H2: failed to make connection to backend: %s",
ctx->p_conn->hostname);
- goto cleanup;
+ goto reconnect;
}
/* Step Three: Create conn_rec for the socket we have open now. */
if (!ctx->p_conn->connection) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
- "setup new connection: is_ssl=%d %s %s %s",
- ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
- locurl, ctx->p_conn->hostname);
if ((status = ap_proxy_connection_create(ctx->proxy_func, ctx->p_conn,
ctx->owner,
ctx->server)) != OK) {
- goto cleanup;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
+ "setup new connection: is_ssl=%d %s %s %s",
+ ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
+ locurl, ctx->p_conn->hostname);
+ goto reconnect;
}
- /*
- * On SSL connections set a note on the connection what CN is
- * requested, such that mod_ssl can check if it is requested to do
- * so.
- */
- if (ctx->p_conn->ssl_hostname) {
- apr_table_setn(ctx->p_conn->connection->notes,
- "proxy-request-hostname", ctx->p_conn->ssl_hostname);
- }
-
- if (ctx->is_ssl) {
- apr_table_setn(ctx->p_conn->connection->notes,
- "proxy-request-alpn-protos", "h2");
+ if (!ctx->p_conn->data) {
+ /* New conection: set a note on the connection what CN is
+ * requested and what protocol we want */
+ if (ctx->p_conn->ssl_hostname) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, ctx->owner,
+ "set SNI to %s for (%s)",
+ ctx->p_conn->ssl_hostname,
+ ctx->p_conn->hostname);
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-hostname", ctx->p_conn->ssl_hostname);
+ }
+ if (ctx->is_ssl) {
+ apr_table_setn(ctx->p_conn->connection->notes,
+ "proxy-request-alpn-protos", "h2");
+ }
}
}
@@ -609,8 +618,8 @@ run_session:
ctx->engine = NULL;
}
-cleanup:
- if (!reconnected && ctx->engine && next_request(ctx, 1) == APR_SUCCESS) {
+reconnect:
+ if (next_request(ctx, 1) == APR_SUCCESS) {
/* Still more to do, tear down old conn and start over */
if (ctx->p_conn) {
ctx->p_conn->close = 1;
@@ -619,10 +628,16 @@ cleanup:
ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
ctx->p_conn = NULL;
}
- reconnected = 1; /* we do this only once, then fail */
- goto run_connect;
+ ++reconnects;
+ if (reconnects < 5 && !ctx->owner->aborted) {
+ goto run_connect;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023)
+ "giving up after %d reconnects, %d requests todo",
+ reconnects, h2_proxy_fifo_count(ctx->requests));
}
+cleanup:
if (ctx->p_conn) {
if (status != APR_SUCCESS) {
/* close socket when errors happened or session shut down (EOF) */
@@ -634,6 +649,11 @@ cleanup:
ctx->p_conn = NULL;
}
+ /* Any requests will still have need to fail */
+ while (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
+ request_done(ctx, r, HTTP_SERVICE_UNAVAILABLE, 1);
+ }
+
ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
APLOGNO(03377) "leaving handler");
diff -up --new-file httpd-2.4.23/modules/http2/NWGNUmod_http2 /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/NWGNUmod_http2
--- httpd-2.4.23/modules/http2/NWGNUmod_http2 2016-06-28 21:57:30.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/NWGNUmod_http2 2017-03-31 21:41:01.000000000 +0200
@@ -186,7 +186,6 @@ TARGET_lib = \
FILES_nlm_objs = \
$(OBJDIR)/h2_alt_svc.o \
$(OBJDIR)/h2_bucket_beam.o \
- $(OBJDIR)/h2_bucket_eoc.o \
$(OBJDIR)/h2_bucket_eos.o \
$(OBJDIR)/h2_config.o \
$(OBJDIR)/h2_conn.o \
@@ -199,13 +198,12 @@ FILES_nlm_objs = \
$(OBJDIR)/h2_ngn_shed.o \
$(OBJDIR)/h2_push.o \
$(OBJDIR)/h2_request.o \
- $(OBJDIR)/h2_response.o \
+ $(OBJDIR)/h2_headers.o \
$(OBJDIR)/h2_session.o \
$(OBJDIR)/h2_stream.o \
$(OBJDIR)/h2_switch.o \
$(OBJDIR)/h2_task.o \
$(OBJDIR)/h2_util.o \
- $(OBJDIR)/h2_worker.o \
$(OBJDIR)/h2_workers.o \
$(OBJDIR)/mod_http2.o \
$(EOLIST)
@@ -355,25 +353,6 @@ $(OBJDIR)/mod_http2.imp : NWGNUmod_http2
@echo $(DL)GEN $@$(DL)
@echo $(DL) (HTTP2)$(DL) > $@
@echo $(DL) http2_module,$(DL) >> $@
- @echo $(DL) h2_ihash_add,$(DL) >> $@
- @echo $(DL) h2_ihash_clear,$(DL) >> $@
- @echo $(DL) h2_ihash_count,$(DL) >> $@
- @echo $(DL) h2_ihash_create,$(DL) >> $@
- @echo $(DL) h2_ihash_empty,$(DL) >> $@
- @echo $(DL) h2_ihash_iter,$(DL) >> $@
- @echo $(DL) h2_ihash_remove,$(DL) >> $@
- @echo $(DL) h2_iq_add,$(DL) >> $@
- @echo $(DL) h2_iq_create,$(DL) >> $@
- @echo $(DL) h2_iq_remove,$(DL) >> $@
- @echo $(DL) h2_log2,$(DL) >> $@
- @echo $(DL) h2_proxy_res_ignore_header,$(DL) >> $@
- @echo $(DL) h2_headers_add_h1,$(DL) >> $@
- @echo $(DL) h2_req_create,$(DL) >> $@
- @echo $(DL) h2_req_createn,$(DL) >> $@
- @echo $(DL) h2_req_make,$(DL) >> $@
- @echo $(DL) h2_util_camel_case_header,$(DL) >> $@
- @echo $(DL) h2_util_frame_print,$(DL) >> $@
- @echo $(DL) h2_util_ngheader_make_req,$(DL) >> $@
@echo $(DL) nghttp2_is_fatal,$(DL) >> $@
@echo $(DL) nghttp2_option_del,$(DL) >> $@
@echo $(DL) nghttp2_option_new,$(DL) >> $@
@@ -400,6 +379,7 @@ $(OBJDIR)/mod_http2.imp : NWGNUmod_http2
@echo $(DL) nghttp2_session_want_write,$(DL) >> $@
@echo $(DL) nghttp2_strerror,$(DL) >> $@
@echo $(DL) nghttp2_submit_goaway,$(DL) >> $@
+ @echo $(DL) nghttp2_submit_ping,$(DL) >> $@
@echo $(DL) nghttp2_submit_request,$(DL) >> $@
@echo $(DL) nghttp2_submit_rst_stream,$(DL) >> $@
@echo $(DL) nghttp2_submit_settings,$(DL) >> $@
diff -up --new-file httpd-2.4.23/modules/http2/NWGNUproxyht2 /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/NWGNUproxyht2
--- httpd-2.4.23/modules/http2/NWGNUproxyht2 2016-06-28 21:57:30.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/NWGNUproxyht2 2016-08-31 11:14:39.000000000 +0200
@@ -182,6 +182,7 @@ TARGET_lib = \
FILES_nlm_objs = \
$(OBJDIR)/mod_proxy_http2.o \
$(OBJDIR)/h2_proxy_session.o \
+ $(OBJDIR)/h2_proxy_util.o \
$(EOLIST)
#
diff -up --new-file httpd-2.4.23/modules/http2/README.h2 /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/README.h2
--- httpd-2.4.23/modules/http2/README.h2 2015-09-28 21:30:00.000000000 +0200
+++ /home/pgajdos/osc/Apache/apache2/httpd-2.4.29/modules/http2/README.h2 2016-08-25 14:48:18.000000000 +0200
@@ -60,7 +60,7 @@ TLS CONFIGURATION
If you want to use HTTP/2 with a browser, most modern browsers will support
it without further configuration. However, browsers so far only support
-HTTP/2 over TLS and are expecially picky about the certificate and
+HTTP/2 over TLS and are especially picky about the certificate and
encryption ciphers used.
Server admins may look for up-to-date information about "modern" TLS