Mailing List Archive

svn commit: r1863280 - /httpd/httpd/patches/2.4.x/h2-keepalive+throttling-v3.patch
Author: icing
Date: Thu Jul 18 12:46:10 2019
New Revision: 1863280

URL: http://svn.apache.org/viewvc?rev=1863280&view=rev
Log:
now with more

Added:
httpd/httpd/patches/2.4.x/h2-keepalive+throttling-v3.patch

Added: httpd/httpd/patches/2.4.x/h2-keepalive+throttling-v3.patch
URL: http://svn.apache.org/viewvc/httpd/httpd/patches/2.4.x/h2-keepalive%2Bthrottling-v3.patch?rev=1863280&view=auto
==============================================================================
--- httpd/httpd/patches/2.4.x/h2-keepalive+throttling-v3.patch (added)
+++ httpd/httpd/patches/2.4.x/h2-keepalive+throttling-v3.patch Thu Jul 18 12:46:10 2019
@@ -0,0 +1,894 @@
+Index: CHANGES
+===================================================================
+--- CHANGES (revision 1863276)
++++ CHANGES (working copy)
+@@ -1,6 +1,18 @@
+ -*- coding: utf-8 -*-
+ Changes with Apache 2.4.40
+
++ *) mod_http2: core setting "LimitRequestFieldSize" is not additionally checked on
++ merged header fields, just as HTTP/1.1 does. [Stefan Eissing, Michael Kaufmann]
++
++ *) mod_http2: fixed a bug that prevented proper stream cleanup when connection
++ throttling was in place. Stream resets by clients on streams initiated by them
++ are counted as possible trigger for throttling. [Stefan Eissing]
++
++ *) mod_http2/mpm_event: Fixes the behaviour when a HTTP/2 connection has nothing
++ more to write with streams ongoing (flow control block). The timeout waiting
++ for the client to send WINODW_UPDATE was incorrectly KeepAliveTimeout and not
++ Timeout as it should be. Fixes PR 63534. [Yann Ylavic, Stefan Eissing]
++
+ *) mod_md: new features
+ - supports the ACMEv2 protocol
+ - new challenge method 'tls-alpn-01' implemented, needs mod_ssl patch to become available
+Index: modules/http2/h2_conn.c
+===================================================================
+--- modules/http2/h2_conn.c (revision 1863276)
++++ modules/http2/h2_conn.c (working copy)
+@@ -231,6 +231,13 @@
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ c->cs->state = CONN_STATE_WRITE_COMPLETION;
++ if (c->cs && (session->open_streams || !session->remote.emitted_count)) {
++ /* let the MPM know that we are not done and want
++ * the Timeout behaviour instead of a KeepAliveTimeout
++ * See PR 63534.
++ */
++ c->cs->sense = CONN_SENSE_WANT_READ;
++ }
+ break;
+ case H2_SESSION_ST_CLEANUP:
+ case H2_SESSION_ST_DONE:
+Index: modules/http2/h2_filter.c
+===================================================================
+--- modules/http2/h2_filter.c (revision 1863276)
++++ modules/http2/h2_filter.c (working copy)
+@@ -493,6 +493,52 @@
+ return APR_SUCCESS;
+ }
+
++static apr_status_t discard_body(request_rec *r, apr_off_t maxlen)
++{
++ apr_bucket_brigade *bb;
++ int seen_eos;
++ apr_status_t rv;
++
++ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
++ seen_eos = 0;
++ do {
++ apr_bucket *bucket;
++
++ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
++ APR_BLOCK_READ, HUGE_STRING_LEN);
++
++ if (rv != APR_SUCCESS) {
++ apr_brigade_destroy(bb);
++ return rv;
++ }
++
++ for (bucket = APR_BRIGADE_FIRST(bb);
++ bucket != APR_BRIGADE_SENTINEL(bb);
++ bucket = APR_BUCKET_NEXT(bucket))
++ {
++ const char *data;
++ apr_size_t len;
++
++ if (APR_BUCKET_IS_EOS(bucket)) {
++ seen_eos = 1;
++ break;
++ }
++ if (bucket->length == 0) {
++ continue;
++ }
++ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
++ if (rv != APR_SUCCESS) {
++ apr_brigade_destroy(bb);
++ return rv;
++ }
++ maxlen -= bucket->length;
++ }
++ apr_brigade_cleanup(bb);
++ } while (!seen_eos && maxlen >= 0);
++
++ return APR_SUCCESS;
++}
++
+ int h2_filter_h2_status_handler(request_rec *r)
+ {
+ conn_rec *c = r->connection;
+@@ -510,8 +556,10 @@
+
+ task = h2_ctx_get_task(r->connection);
+ if (task) {
+-
+- if ((status = ap_discard_request_body(r)) != OK) {
++ /* In this handler, we do some special sauce to send footers back,
++ * IFF we received footers in the request. This is used in our test
++ * cases, since CGI has no way of handling those. */
++ if ((status = discard_body(r, 1024)) != OK) {
+ return status;
+ }
+
+Index: modules/http2/h2_mplx.c
+===================================================================
+--- modules/http2/h2_mplx.c (revision 1863276)
++++ modules/http2/h2_mplx.c (working copy)
+@@ -53,8 +53,12 @@
+ h2_mplx *m;
+ h2_stream *stream;
+ apr_time_t now;
++ apr_size_t count;
+ } stream_iter_ctx;
+
++static apr_status_t mplx_be_happy(h2_mplx *m);
++static apr_status_t mplx_be_annoyed(h2_mplx *m);
++
+ apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
+ {
+ return APR_SUCCESS;
+@@ -98,7 +102,7 @@
+
+ static void stream_joined(h2_mplx *m, h2_stream *stream)
+ {
+- ap_assert(!stream->task || stream->task->worker_done);
++ ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
+
+ h2_ihash_remove(m->shold, stream->id);
+ h2_ihash_add(m->spurge, stream);
+@@ -124,7 +128,7 @@
+ h2_ififo_remove(m->readyq, stream->id);
+ h2_ihash_add(m->shold, stream);
+
+- if (!stream->task || stream->task->worker_done) {
++ if (!h2_task_has_started(stream->task) || stream->task->done_done) {
+ stream_joined(m, stream);
+ }
+ else if (stream->task) {
+@@ -194,7 +198,6 @@
+ m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
+
+ m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+- m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->q = h2_iq_create(m->pool, m->max_streams);
+@@ -208,8 +211,8 @@
+ m->workers = workers;
+ m->max_active = workers->max_workers;
+ m->limit_active = 6; /* the original h1 max parallel connections */
+- m->last_limit_change = m->last_idle_block = apr_time_now();
+- m->limit_change_interval = apr_time_from_msec(100);
++ m->last_mood_change = apr_time_now();
++ m->mood_update_interval = apr_time_from_msec(100);
+
+ m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
+ }
+@@ -431,6 +434,10 @@
+
+ /* How to shut down a h2 connection:
+ * 1. cancel all streams still active */
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
++ "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks",
++ m->id, (int)h2_ihash_count(m->streams),
++ (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
+ while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
+ /* until empty */
+ }
+@@ -456,10 +463,10 @@
+ h2_ihash_iter(m->shold, report_stream_iter, m);
+ }
+ }
+- ap_assert(m->tasks_active == 0);
+ m->join_wait = NULL;
+-
++
+ /* 4. With all workers done, all streams should be in spurge */
++ ap_assert(m->tasks_active == 0);
+ if (!h2_ihash_empty(m->shold)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
+ "h2_mplx(%ld): unexpected %d streams in hold",
+@@ -470,8 +477,7 @@
+ m->c->aborted = old_aborted;
+ H2_MPLX_LEAVE(m);
+
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+- "h2_mplx(%ld): released", m->id);
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
+ }
+
+ apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
+@@ -709,7 +715,6 @@
+ }
+
+ if (!stream->task) {
+-
+ if (sid > m->max_stream_started) {
+ m->max_stream_started = sid;
+ }
+@@ -728,9 +733,9 @@
+ "create task"));
+ return NULL;
+ }
+-
+ }
+
++ stream->task->started_at = apr_time_now();
+ ++m->tasks_active;
+ return stream->task;
+ }
+@@ -778,32 +783,18 @@
+ "h2_mplx(%s): request done, %f ms elapsed", task->id,
+ (task->done_at - task->started_at) / 1000.0);
+
+- if (task->started_at > m->last_idle_block) {
+- /* this task finished without causing an 'idle block', e.g.
+- * a block by flow control.
+- */
+- if (task->done_at- m->last_limit_change >= m->limit_change_interval
+- && m->limit_active < m->max_active) {
+- /* Well behaving stream, allow it more workers */
+- m->limit_active = H2MIN(m->limit_active * 2,
+- m->max_active);
+- m->last_limit_change = task->done_at;
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+- "h2_mplx(%ld): increase worker limit to %d",
+- m->id, m->limit_active);
+- }
++ if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
++ mplx_be_happy(m);
+ }
+-
++
+ ap_assert(task->done_done == 0);
+
+ stream = h2_ihash_get(m->streams, task->stream_id);
+ if (stream) {
+ /* stream not done yet. */
+- if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
++ if (!m->aborted && task->redo) {
+ /* reset and schedule again */
+- task->worker_done = 0;
+ h2_task_redo(task);
+- h2_ihash_remove(m->sredo, stream->id);
+ h2_iq_add(m->q, stream->id, NULL, NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
+ H2_STRM_MSG(stream, "redo, added to q"));
+@@ -848,8 +839,8 @@
+ {
+ H2_MPLX_ENTER_ALWAYS(m);
+
++ --m->tasks_active;
+ task_done(m, task);
+- --m->tasks_active;
+
+ if (m->join_wait) {
+ apr_thread_cond_signal(m->join_wait);
+@@ -867,94 +858,161 @@
+ * h2_mplx DoS protection
+ ******************************************************************************/
+
+-static int latest_repeatable_unsubmitted_iter(void *data, void *val)
++static int timed_out_busy_iter(void *data, void *val)
+ {
+ stream_iter_ctx *ctx = data;
+ h2_stream *stream = val;
+-
+- if (stream->task && !stream->task->worker_done
+- && h2_task_can_redo(stream->task)
+- && !h2_ihash_get(ctx->m->sredo, stream->id)) {
+- if (!h2_stream_is_ready(stream)) {
+- /* this task occupies a worker, the response has not been submitted
+- * yet, not been cancelled and it is a repeatable request
+- * -> it can be re-scheduled later */
+- if (!ctx->stream
+- || (ctx->stream->task->started_at < stream->task->started_at)) {
+- /* we did not have one or this one was started later */
+- ctx->stream = stream;
+- }
+- }
++ if (h2_task_has_started(stream->task) && !stream->task->worker_done
++ && (ctx->now - stream->task->started_at) > stream->task->timeout) {
++ /* timed out stream occupying a worker, found */
++ ctx->stream = stream;
++ return 0;
+ }
+ return 1;
+ }
+
+-static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m)
++static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
+ {
+ stream_iter_ctx ctx;
+ ctx.m = m;
+ ctx.stream = NULL;
+- h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
++ ctx.now = apr_time_now();
++ h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
+ return ctx.stream;
+ }
+
+-static int timed_out_busy_iter(void *data, void *val)
++static int latest_repeatable_unsubmitted_iter(void *data, void *val)
+ {
+ stream_iter_ctx *ctx = data;
+ h2_stream *stream = val;
+- if (stream->task && !stream->task->worker_done
+- && (ctx->now - stream->task->started_at) > stream->task->timeout) {
+- /* timed out stream occupying a worker, found */
+- ctx->stream = stream;
+- return 0;
++
++ if (!stream->task) goto leave;
++ if (!h2_task_has_started(stream->task) || stream->task->worker_done) goto leave;
++ if (h2_stream_is_ready(stream)) goto leave;
++ if (stream->task->redo) {
++ ++ctx->count;
++ goto leave;
+ }
++ if (h2_task_can_redo(stream->task)) {
++ /* this task occupies a worker, the response has not been submitted
++ * yet, not been cancelled and it is a repeatable request
++ * -> we could redo it later */
++ if (!ctx->stream
++ || (ctx->stream->task->started_at < stream->task->started_at)) {
++ /* we did not have one or this one was started later */
++ ctx->stream = stream;
++ }
++ }
++leave:
+ return 1;
+ }
+
+-static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
++static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m)
+ {
+ stream_iter_ctx ctx;
++
++ /* count the running tasks already marked for redo and get one that could
++ * be throttled */
++ *ptask = NULL;
+ ctx.m = m;
+ ctx.stream = NULL;
+- ctx.now = apr_time_now();
+- h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
+- return ctx.stream;
++ ctx.count = 0;
++ h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
++ if (m->tasks_active - ctx.count > m->limit_active) {
++ /* we are above the limit of running tasks, accounting for the ones
++ * already throttled. */
++ if (ctx.stream && ctx.stream->task) {
++ *ptask = ctx.stream->task;
++ return APR_EAGAIN;
++ }
++ /* above limit, be seeing no candidate for easy throttling */
++ if (get_timed_out_busy_stream(m)) {
++ /* Too many busy workers, unable to cancel enough streams
++ * and with a busy, timed out stream, we tell the client
++ * to go away... */
++ return APR_TIMEUP;
++ }
++ }
++ return APR_SUCCESS;
+ }
+
+ static apr_status_t unschedule_slow_tasks(h2_mplx *m)
+ {
+- h2_stream *stream;
+- int n;
++ h2_task *task;
++ apr_status_t rv;
+
+ /* Try to get rid of streams that occupy workers. Look for safe requests
+ * that are repeatable. If none found, fail the connection.
+ */
+- n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
+- while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
++ while (APR_EAGAIN == (rv = assess_task_to_throttle(&task, m))) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ "h2_mplx(%s): unschedule, resetting task for redo later",
+- stream->task->id);
+- h2_task_rst(stream->task, H2_ERR_CANCEL);
+- h2_ihash_add(m->sredo, stream);
+- --n;
++ task->id);
++ task->redo = 1;
++ h2_task_rst(task, H2_ERR_CANCEL);
+ }
+
+- if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) {
+- h2_stream *stream = get_timed_out_busy_stream(m);
+- if (stream) {
+- /* Too many busy workers, unable to cancel enough streams
+- * and with a busy, timed out stream, we tell the client
+- * to go away... */
+- return APR_TIMEUP;
+- }
++ return rv;
++}
++
++static apr_status_t mplx_be_happy(h2_mplx *m)
++{
++ apr_time_t now;
++
++ --m->irritations_since;
++ now = apr_time_now();
++ if (m->limit_active < m->max_active
++ && (now - m->last_mood_change >= m->mood_update_interval
++ || m->irritations_since < -m->limit_active)) {
++ m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
++ m->last_mood_change = now;
++ m->irritations_since = 0;
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
++ "h2_mplx(%ld): mood update, increasing worker limit to %d",
++ m->id, m->limit_active);
+ }
+ return APR_SUCCESS;
+ }
+
++static apr_status_t mplx_be_annoyed(h2_mplx *m)
++{
++ apr_status_t status = APR_SUCCESS;
++ apr_time_t now;
++
++ ++m->irritations_since;
++ now = apr_time_now();
++ if (m->limit_active > 2 &&
++ ((now - m->last_mood_change >= m->mood_update_interval)
++ || (m->irritations_since >= m->limit_active))) {
++
++ if (m->limit_active > 16) {
++ m->limit_active = 16;
++ }
++ else if (m->limit_active > 8) {
++ m->limit_active = 8;
++ }
++ else if (m->limit_active > 4) {
++ m->limit_active = 4;
++ }
++ else if (m->limit_active > 2) {
++ m->limit_active = 2;
++ }
++ m->last_mood_change = now;
++ m->irritations_since = 0;
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
++ "h2_mplx(%ld): mood update, decreasing worker limit to %d",
++ m->id, m->limit_active);
++ }
++
++ if (m->tasks_active > m->limit_active) {
++ status = unschedule_slow_tasks(m);
++ }
++ return status;
++}
++
+ apr_status_t h2_mplx_idle(h2_mplx *m)
+ {
+ apr_status_t status = APR_SUCCESS;
+- apr_time_t now;
+ apr_size_t scount;
+
+ H2_MPLX_ENTER(m);
+@@ -974,31 +1032,7 @@
+ * of busy workers we allow for this connection until it
+ * well behaves.
+ */
+- now = apr_time_now();
+- m->last_idle_block = now;
+- if (m->limit_active > 2
+- && now - m->last_limit_change >= m->limit_change_interval) {
+- if (m->limit_active > 16) {
+- m->limit_active = 16;
+- }
+- else if (m->limit_active > 8) {
+- m->limit_active = 8;
+- }
+- else if (m->limit_active > 4) {
+- m->limit_active = 4;
+- }
+- else if (m->limit_active > 2) {
+- m->limit_active = 2;
+- }
+- m->last_limit_change = now;
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+- "h2_mplx(%ld): decrease worker limit to %d",
+- m->id, m->limit_active);
+- }
+-
+- if (m->tasks_active > m->limit_active) {
+- status = unschedule_slow_tasks(m);
+- }
++ status = mplx_be_annoyed(m);
+ }
+ else if (!h2_iq_empty(m->q)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+@@ -1093,8 +1127,7 @@
+ if (h2_ihash_empty(m->streams)) {
+ waiting = 0;
+ }
+- else if (!m->tasks_active && !h2_ififo_count(m->readyq)
+- && h2_iq_empty(m->q)) {
++ else if (!m->tasks_active && !h2_ififo_count(m->readyq) && h2_iq_empty(m->q)) {
+ waiting = 0;
+ }
+
+@@ -1101,3 +1134,17 @@
+ H2_MPLX_LEAVE(m);
+ return waiting;
+ }
++
++apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id)
++{
++ h2_stream *stream;
++ apr_status_t status = APR_SUCCESS;
++
++ H2_MPLX_ENTER_ALWAYS(m);
++ stream = h2_ihash_get(m->streams, stream_id);
++ if (stream && stream->task) {
++ status = mplx_be_annoyed(m);
++ }
++ H2_MPLX_LEAVE(m);
++ return status;
++}
+Index: modules/http2/h2_mplx.h
+===================================================================
+--- modules/http2/h2_mplx.h (revision 1863276)
++++ modules/http2/h2_mplx.h (working copy)
+@@ -63,7 +63,6 @@
+ unsigned int is_registered; /* is registered at h2_workers */
+
+ struct h2_ihash_t *streams; /* all streams currently processing */
+- struct h2_ihash_t *sredo; /* all streams that need to be re-started */
+ struct h2_ihash_t *shold; /* all streams done with task ongoing */
+ struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
+
+@@ -77,10 +76,10 @@
+ int tasks_active; /* # of tasks being processed from this mplx */
+ int limit_active; /* current limit on active tasks, dynamic */
+ int max_active; /* max, hard limit # of active tasks in a process */
+- apr_time_t last_idle_block; /* last time, this mplx entered IDLE while
+- * streams were ready */
+- apr_time_t last_limit_change; /* last time, worker limit changed */
+- apr_interval_time_t limit_change_interval;
++
++ apr_time_t last_mood_change; /* last time, we worker limit changed */
++ apr_interval_time_t mood_update_interval; /* how frequent we update at most */
++ int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
+
+ apr_thread_mutex_t *lock;
+ struct apr_thread_cond_t *added_output;
+@@ -205,6 +204,8 @@
+
+ apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
+
++apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id);
++
+ /*******************************************************************************
+ * Output handling of streams.
+ ******************************************************************************/
+Index: modules/http2/h2_proxy_session.c
+===================================================================
+--- modules/http2/h2_proxy_session.c (revision 1863276)
++++ modules/http2/h2_proxy_session.c (working copy)
+@@ -557,7 +557,7 @@
+ " total, flags=%d", stream->id, (long)readlen, (long)stream->data_sent,
+ (int)*data_flags);
+ if ((*data_flags & NGHTTP2_DATA_FLAG_EOF) && !apr_is_empty_table(stream->r->trailers_in)) {
+- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(03468)
++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, stream->r, APLOGNO(10179)
+ "h2_proxy_stream(%d): submit trailers", stream->id);
+ *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM;
+ submit_trailers(stream);
+Index: modules/http2/h2_session.c
+===================================================================
+--- modules/http2/h2_session.c (revision 1863276)
++++ modules/http2/h2_session.c (working copy)
+@@ -390,9 +390,14 @@
+ (int)frame->rst_stream.error_code);
+ stream = h2_session_stream_get(session, frame->hd.stream_id);
+ if (stream && stream->initiated_on) {
++ /* A stream reset on a request we sent it. Normal, when the
++ * client does not want it. */
+ ++session->pushes_reset;
+ }
+ else {
++ /* A stream reset on a request it sent us. Could happen in a browser
++ * when the user navigates away or cancels loading - maybe. */
++ h2_mplx_client_rst(session->mplx, frame->hd.stream_id);
+ ++session->streams_reset;
+ }
+ break;
+@@ -1703,7 +1708,7 @@
+ * that already served requests - not fair. */
+ session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
+ s = "timeout";
+- timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout);
++ timeout = session->s->timeout;
+ update_child_status(session, SERVER_BUSY_READ, "idle");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
+@@ -1711,8 +1716,8 @@
+ }
+ else if (session->open_streams) {
+ s = "timeout";
+- timeout = session->s->keep_alive_timeout;
+- update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
++ timeout = session->s->timeout;
++ update_child_status(session, SERVER_BUSY_READ, "idle");
+ }
+ else {
+ /* normal keepalive setup */
+@@ -2170,6 +2175,14 @@
+ session->have_read = 1;
+ }
+ else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
++ status = h2_mplx_idle(session->mplx);
++ if (status == APR_EAGAIN) {
++ break;
++ }
++ else if (status != APR_SUCCESS) {
++ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
++ H2_ERR_ENHANCE_YOUR_CALM, "less is more");
++ }
+ status = APR_EAGAIN;
+ goto out;
+ }
+Index: modules/http2/h2_stream.c
+===================================================================
+--- modules/http2/h2_stream.c (revision 1863276)
++++ modules/http2/h2_stream.c (working copy)
+@@ -397,13 +397,8 @@
+ /* start pushed stream */
+ ap_assert(stream->request == NULL);
+ ap_assert(stream->rtmp != NULL);
+- status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0);
+- if (status != APR_SUCCESS) {
+- return status;
+- }
+- set_policy_for(stream, stream->rtmp);
+- stream->request = stream->rtmp;
+- stream->rtmp = NULL;
++ status = h2_stream_end_headers(stream, 1, 0);
++ if (status != APR_SUCCESS) goto leave;
+ break;
+
+ default:
+@@ -415,6 +410,7 @@
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
+ }
++leave:
+ return status;
+ }
+
+@@ -455,13 +451,8 @@
+ * to abort the connection here, since this is clearly a protocol error */
+ return APR_EINVAL;
+ }
+- status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len);
+- if (status != APR_SUCCESS) {
+- return status;
+- }
+- set_policy_for(stream, stream->rtmp);
+- stream->request = stream->rtmp;
+- stream->rtmp = NULL;
++ status = h2_stream_end_headers(stream, eos, frame_len);
++ if (status != APR_SUCCESS) goto leave;
+ }
+ break;
+
+@@ -472,6 +463,7 @@
+ if (status == APR_SUCCESS && eos) {
+ status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
+ }
++leave:
+ return status;
+ }
+
+@@ -683,6 +675,8 @@
+ hvalue = apr_pstrndup(stream->pool, value, vlen);
+ h2_util_camel_case_header(hname, nlen);
+ apr_table_mergen(stream->trailers, hname, hvalue);
++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
++ H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
+
+ return APR_SUCCESS;
+ }
+@@ -702,15 +696,19 @@
+ if (name[0] == ':') {
+ if ((vlen) > session->s->limit_req_line) {
+ /* pseudo header: approximation of request line size check */
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+- H2_STRM_MSG(stream, "pseudo %s too long"), name);
++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
++ H2_STRM_LOG(APLOGNO(10178), stream,
++ "Request pseudo header exceeds "
++ "LimitRequestFieldSize: %s"), name);
+ error = HTTP_REQUEST_URI_TOO_LARGE;
+ }
+ }
+ else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
+ /* header too long */
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+- H2_STRM_MSG(stream, "header %s too long"), name);
++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
++ H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
++ "LimitRequestFieldSize: %.*s"),
++ (int)H2MIN(nlen, 80), name);
+ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+ }
+
+@@ -722,8 +720,9 @@
+ h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
+ return APR_ECONNRESET;
+ }
+- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+- H2_STRM_MSG(stream, "too many header lines"));
++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
++ H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
++ "exceeds LimitRequestFields"));
+ error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
+ }
+
+@@ -754,6 +753,47 @@
+ return status;
+ }
+
++typedef struct {
++ apr_size_t maxlen;
++ const char *failed_key;
++} val_len_check_ctx;
++
++static int table_check_val_len(void *baton, const char *key, const char *value)
++{
++ val_len_check_ctx *ctx = baton;
++
++ if (strlen(value) <= ctx->maxlen) return 1;
++ ctx->failed_key = key;
++ return 0;
++}
++
++apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
++{
++ apr_status_t status;
++ val_len_check_ctx ctx;
++
++ status = h2_request_end_headers(stream->rtmp, stream->pool, eos, raw_bytes);
++ if (APR_SUCCESS == status) {
++ set_policy_for(stream, stream->rtmp);
++ stream->request = stream->rtmp;
++ stream->rtmp = NULL;
++
++ ctx.maxlen = stream->session->s->limit_req_fieldsize;
++ ctx.failed_key = NULL;
++ apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL);
++ if (ctx.failed_key) {
++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,
++ H2_STRM_LOG(APLOGNO(), stream,"Request header exceeds "
++ "LimitRequestFieldSize: %.*s"),
++ (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
++ set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
++ /* keep on returning APR_SUCCESS, so that we send a HTTP response and
++ * do not RST the stream. */
++ }
++ }
++ return status;
++}
++
+ static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
+ {
+ if (bb) {
+Index: modules/http2/h2_stream.h
+===================================================================
+--- modules/http2/h2_stream.h (revision 1863276)
++++ modules/http2/h2_stream.h (working copy)
+@@ -198,7 +198,11 @@
+ apr_status_t h2_stream_add_header(h2_stream *stream,
+ const char *name, size_t nlen,
+ const char *value, size_t vlen);
++
++/* End the contruction of request headers */
++apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
+
++
+ apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+ apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
+
+Index: modules/http2/h2_task.c
+===================================================================
+--- modules/http2/h2_task.c (revision 1863276)
++++ modules/http2/h2_task.c (working copy)
+@@ -406,8 +406,15 @@
+ || !strcmp("OPTIONS", task->request->method));
+ }
+
++int h2_task_has_started(h2_task *task)
++{
++ return task && task->started_at != 0;
++}
++
+ void h2_task_redo(h2_task *task)
+ {
++ task->started_at = 0;
++ task->worker_done = 0;
+ task->rst_error = 0;
+ }
+
+@@ -546,7 +553,6 @@
+ ap_assert(task);
+ c = task->c;
+ task->worker_started = 1;
+- task->started_at = apr_time_now();
+
+ if (c->master) {
+ /* Each conn_rec->id is supposed to be unique at a point in time. Since
+Index: modules/http2/h2_task.h
+===================================================================
+--- modules/http2/h2_task.h (revision 1863276)
++++ modules/http2/h2_task.h (working copy)
+@@ -80,6 +80,7 @@
+
+ unsigned int filters_set : 1;
+ unsigned int worker_started : 1; /* h2_worker started processing */
++ unsigned int redo : 1; /* was throttled, should be restarted later */
+
+ int worker_done; /* h2_worker finished */
+ int done_done; /* task_done has been handled */
+@@ -101,6 +102,7 @@
+
+ void h2_task_redo(h2_task *task);
+ int h2_task_can_redo(h2_task *task);
++int h2_task_has_started(h2_task *task);
+
+ /**
+ * Reset the task with the given error code, resets all input/output.
+Index: modules/http2/h2_version.h
+===================================================================
+--- modules/http2/h2_version.h (revision 1863276)
++++ modules/http2/h2_version.h (working copy)
+@@ -27,7 +27,7 @@
+ * @macro
+ * Version number of the http2 module as c string
+ */
+-#define MOD_HTTP2_VERSION "1.15.1"
++#define MOD_HTTP2_VERSION "1.15.4"
+
+ /**
+ * @macro
+@@ -35,6 +35,6 @@
+ * release. This is a 24 bit number with 8 bits for major number, 8 bits
+ * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
+ */
+-#define MOD_HTTP2_VERSION_NUM 0x010f01
++#define MOD_HTTP2_VERSION_NUM 0x010f04
+
+ #endif /* mod_h2_h2_version_h */
+Index: server/mpm/event/event.c
+===================================================================
+--- server/mpm/event/event.c (revision 1863276)
++++ server/mpm/event/event.c (working copy)
+@@ -1113,10 +1113,11 @@
+ "network write failure in core output filter");
+ cs->pub.state = CONN_STATE_LINGER;
+ }
+- else if (c->data_in_output_filters) {
++ else if (c->data_in_output_filters ||
++ cs->pub.sense == CONN_SENSE_WANT_READ) {
+ /* Still in WRITE_COMPLETION_STATE:
+- * Set a write timeout for this connection, and let the
+- * event thread poll for writeability.
++ * Set a read/write timeout for this connection, and let the
++ * event thread poll for read/writeability.
+ */
+ cs->queue_timestamp = apr_time_now();
+ notify_suspend(cs);
+Index: .
+===================================================================
+--- . (revision 1863276)
++++ . (working copy)
+
+Property changes on: .
+___________________________________________________________________
+Modified: svn:mergeinfo
+## -0,0 +0,1 ##
+ Merged /httpd/httpd/trunk:r1863221,1863276