Completing request header buffers to avoid memory leak.

Before this fix, only persistent connection request buffers were completed.

This issue was introduced in dc403927ab0b.
This commit is contained in:
Max Romanov
2020-03-19 20:43:35 +03:00
parent 93207d4a8c
commit c26fbbe53a
3 changed files with 29 additions and 22 deletions

View File

@@ -1364,17 +1364,19 @@ nxt_h1p_request_header_send(nxt_task_t *task, nxt_http_request_t *r,
void void
nxt_h1p_complete_buffers(nxt_task_t *task, nxt_h1proto_t *h1p) nxt_h1p_complete_buffers(nxt_task_t *task, nxt_h1proto_t *h1p, nxt_bool_t all)
{ {
size_t size; size_t size;
nxt_buf_t *b, *in, *next; nxt_buf_t *b, *in, *next;
nxt_conn_t *c; nxt_conn_t *c;
nxt_work_queue_t *wq;
nxt_debug(task, "h1p complete buffers"); nxt_debug(task, "h1p complete buffers");
b = h1p->buffers; b = h1p->buffers;
c = h1p->conn; c = h1p->conn;
in = c->read; in = c->read;
wq = &task->thread->engine->fast_work_queue;
if (b != NULL) { if (b != NULL) {
if (in == NULL) { if (in == NULL) {
@@ -1390,8 +1392,7 @@ nxt_h1p_complete_buffers(nxt_task_t *task, nxt_h1proto_t *h1p)
next = b->next; next = b->next;
b->next = NULL; b->next = NULL;
nxt_work_queue_add(&task->thread->engine->fast_work_queue, nxt_work_queue_add(wq, b->completion_handler, task, b, b->parent);
b->completion_handler, task, b, b->parent);
b = next; b = next;
} }
@@ -1403,9 +1404,9 @@ nxt_h1p_complete_buffers(nxt_task_t *task, nxt_h1proto_t *h1p)
if (in != NULL) { if (in != NULL) {
size = nxt_buf_mem_used_size(&in->mem); size = nxt_buf_mem_used_size(&in->mem);
if (size == 0) { if (size == 0 || all) {
nxt_work_queue_add(&task->thread->engine->fast_work_queue, nxt_work_queue_add(wq, in->completion_handler, task, in,
in->completion_handler, task, in, in->parent); in->parent);
c->read = NULL; c->read = NULL;
} }
@@ -1754,7 +1755,7 @@ nxt_h1p_keepalive(nxt_task_t *task, nxt_h1proto_t *h1p, nxt_conn_t *c)
nxt_conn_tcp_nodelay_on(task, c); nxt_conn_tcp_nodelay_on(task, c);
} }
nxt_h1p_complete_buffers(task, h1p); nxt_h1p_complete_buffers(task, h1p, 0);
in = c->read; in = c->read;
@@ -1952,20 +1953,25 @@ nxt_h1p_shutdown(nxt_task_t *task, nxt_conn_t *c)
h1p = c->socket.data; h1p = c->socket.data;
if (nxt_slow_path(h1p != NULL && h1p->websocket_timer != NULL)) { if (h1p != NULL) {
timer = &h1p->websocket_timer->timer; nxt_h1p_complete_buffers(task, h1p, 1);
if (timer->handler != nxt_h1p_conn_ws_shutdown) { if (nxt_slow_path(h1p->websocket_timer != NULL)) {
timer->handler = nxt_h1p_conn_ws_shutdown; timer = &h1p->websocket_timer->timer;
nxt_timer_add(task->thread->engine, timer, 0);
} else { if (timer->handler != nxt_h1p_conn_ws_shutdown) {
nxt_debug(task, "h1p already scheduled ws shutdown"); timer->handler = nxt_h1p_conn_ws_shutdown;
nxt_timer_add(task->thread->engine, timer, 0);
} else {
nxt_debug(task, "h1p already scheduled ws shutdown");
}
return;
} }
} else {
nxt_h1p_closing(task, c);
} }
nxt_h1p_closing(task, c);
} }

View File

@@ -135,7 +135,7 @@ nxt_h1p_websocket_frame_start(nxt_task_t *task, nxt_http_request_t *r,
c = h1p->conn; c = h1p->conn;
c->read = ws_frame; c->read = ws_frame;
nxt_h1p_complete_buffers(task, h1p); nxt_h1p_complete_buffers(task, h1p, 0);
in = c->read; in = c->read;
c->read_state = &nxt_h1p_read_ws_frame_header_state; c->read_state = &nxt_h1p_read_ws_frame_header_state;

View File

@@ -320,7 +320,8 @@ void nxt_h1p_websocket_first_frame_start(nxt_task_t *task,
nxt_http_request_t *r, nxt_buf_t *ws_frame); nxt_http_request_t *r, nxt_buf_t *ws_frame);
void nxt_h1p_websocket_frame_start(nxt_task_t *task, nxt_http_request_t *r, void nxt_h1p_websocket_frame_start(nxt_task_t *task, nxt_http_request_t *r,
nxt_buf_t *ws_frame); nxt_buf_t *ws_frame);
void nxt_h1p_complete_buffers(nxt_task_t *task, nxt_h1proto_t *h1p); void nxt_h1p_complete_buffers(nxt_task_t *task, nxt_h1proto_t *h1p,
nxt_bool_t all);
nxt_msec_t nxt_h1p_conn_request_timer_value(nxt_conn_t *c, uintptr_t data); nxt_msec_t nxt_h1p_conn_request_timer_value(nxt_conn_t *c, uintptr_t data);
extern const nxt_conn_state_t nxt_h1p_idle_close_state; extern const nxt_conn_state_t nxt_h1p_idle_close_state;