Skeleton of router configuration and request processing.
This commit is contained in:
@@ -71,6 +71,7 @@ NXT_LIB_DEPS=" \
|
||||
src/nxt_conf.h \
|
||||
src/nxt_application.h \
|
||||
src/nxt_master_process.h \
|
||||
src/nxt_router.h \
|
||||
"
|
||||
|
||||
NXT_LIB_SRCS=" \
|
||||
@@ -128,6 +129,7 @@ NXT_LIB_SRCS=" \
|
||||
src/nxt_event_conn_accept.c \
|
||||
src/nxt_event_conn_read.c \
|
||||
src/nxt_event_conn_write.c \
|
||||
src/nxt_conn_close.c \
|
||||
src/nxt_event_conn_job_sendfile.c \
|
||||
src/nxt_event_conn_proxy.c \
|
||||
src/nxt_job.c \
|
||||
|
||||
@@ -99,7 +99,7 @@ nxt_app_start(nxt_task_t *task, nxt_runtime_t *rt)
|
||||
|
||||
if (nxt_fast_path(link != NULL)) {
|
||||
link->start = nxt_app_thread;
|
||||
link->data = rt;
|
||||
link->work.data = rt;
|
||||
|
||||
return nxt_thread_create(&handle, link);
|
||||
}
|
||||
@@ -151,8 +151,8 @@ nxt_app_thread(void *ctx)
|
||||
nxt_socket_t s;
|
||||
nxt_thread_t *thr;
|
||||
nxt_runtime_t *rt;
|
||||
nxt_queue_link_t *link;
|
||||
nxt_app_request_t *r;
|
||||
nxt_event_engine_t **engines;
|
||||
nxt_listen_socket_t *ls;
|
||||
u_char buf[SIZE];
|
||||
const size_t size = SIZE;
|
||||
@@ -163,9 +163,9 @@ nxt_app_thread(void *ctx)
|
||||
nxt_log_debug(thr->log, "app thread");
|
||||
|
||||
rt = ctx;
|
||||
engines = rt->engines->elts;
|
||||
|
||||
nxt_app_engine = engines[0];
|
||||
link = nxt_queue_first(&rt->engines);
|
||||
nxt_app_engine = nxt_queue_link_data(link, nxt_event_engine_t, link);
|
||||
|
||||
nxt_app_mem_pool = nxt_mem_pool_create(512);
|
||||
if (nxt_slow_path(nxt_app_mem_pool == NULL)) {
|
||||
|
||||
@@ -173,6 +173,10 @@ nxt_container_of(p, type, field) \
|
||||
(type *) ((u_char *) (p) - offsetof(type, field))
|
||||
|
||||
|
||||
#define nxt_value_at(type, p, offset) \
|
||||
*(type *) ((u_char *) p + offset)
|
||||
|
||||
|
||||
#define \
|
||||
nxt_nitems(x) \
|
||||
(sizeof(x) / sizeof((x)[0]))
|
||||
|
||||
159
src/nxt_conn_close.c
Normal file
159
src/nxt_conn_close.c
Normal file
@@ -0,0 +1,159 @@
|
||||
|
||||
/*
|
||||
* Copyright (C) Igor Sysoev
|
||||
* Copyright (C) NGINX, Inc.
|
||||
*/
|
||||
|
||||
#include <nxt_main.h>
|
||||
|
||||
|
||||
static void nxt_conn_shutdown_handler(nxt_task_t *task, void *obj, void *data);
|
||||
static void nxt_conn_close_handler(nxt_task_t *task, void *obj, void *data);
|
||||
static void nxt_conn_close_timer_handler(nxt_task_t *task, void *obj,
|
||||
void *data);
|
||||
static void nxt_conn_close_error_ignore(nxt_task_t *task, void *obj,
|
||||
void *data);
|
||||
|
||||
|
||||
void
|
||||
nxt_event_conn_close(nxt_event_engine_t *engine, nxt_event_conn_t *c)
|
||||
{
|
||||
int ret;
|
||||
socklen_t len;
|
||||
struct linger linger;
|
||||
nxt_work_queue_t *wq;
|
||||
nxt_work_handler_t handler;
|
||||
|
||||
nxt_debug(c->socket.task, "conn close fd:%d, to:%d",
|
||||
c->socket.fd, c->socket.timedout);
|
||||
|
||||
if (c->socket.timedout) {
|
||||
/*
|
||||
* Resetting of timed out connection on close
|
||||
* releases kernel memory associated with socket.
|
||||
* This also causes sending TCP/IP RST to a peer.
|
||||
*/
|
||||
linger.l_onoff = 1;
|
||||
linger.l_linger = 0;
|
||||
len = sizeof(struct linger);
|
||||
|
||||
ret = setsockopt(c->socket.fd, SOL_SOCKET, SO_LINGER, &linger, len);
|
||||
|
||||
if (nxt_slow_path(ret != 0)) {
|
||||
nxt_log(c->socket.task, NXT_LOG_CRIT,
|
||||
"setsockopt(%d, SO_LINGER) failed %E",
|
||||
c->socket.fd, nxt_socket_errno);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Event errors should be ignored here to avoid repeated nxt_conn_close()
|
||||
* calls. nxt_conn_close_handler() or nxt_conn_close_timer_handler()
|
||||
* will eventually close socket.
|
||||
*/
|
||||
c->socket.error_handler = nxt_conn_close_error_ignore;
|
||||
|
||||
if (c->socket.error == 0 && !c->socket.closed && !c->socket.shutdown) {
|
||||
wq = &engine->shutdown_work_queue;
|
||||
handler = nxt_conn_shutdown_handler;
|
||||
|
||||
} else{
|
||||
wq = &engine->close_work_queue;
|
||||
handler = nxt_conn_close_handler;
|
||||
}
|
||||
|
||||
nxt_work_queue_add(wq, handler, c->socket.task, c, engine);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nxt_conn_shutdown_handler(nxt_task_t *task, void *obj, void *data)
|
||||
{
|
||||
nxt_event_conn_t *c;
|
||||
nxt_event_engine_t *engine;
|
||||
|
||||
c = obj;
|
||||
engine = data;
|
||||
|
||||
nxt_debug(task, "conn shutdown handler fd:%d", c->socket.fd);
|
||||
|
||||
c->socket.shutdown = 1;
|
||||
|
||||
nxt_socket_shutdown(task, c->socket.fd, SHUT_RDWR);
|
||||
|
||||
nxt_work_queue_add(&engine->close_work_queue, nxt_conn_close_handler,
|
||||
task, c, engine);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nxt_conn_close_handler(nxt_task_t *task, void *obj, void *data)
|
||||
{
|
||||
nxt_uint_t events_pending, timers_pending;
|
||||
nxt_event_conn_t *c;
|
||||
nxt_event_engine_t *engine;
|
||||
|
||||
c = obj;
|
||||
engine = data;
|
||||
|
||||
nxt_debug(task, "conn close handler fd:%d", c->socket.fd);
|
||||
|
||||
/*
|
||||
* Socket should be closed only after all pending socket event operations
|
||||
* will be processed by the kernel. This could be achieved with zero-timer
|
||||
* handler. Pending timer operations associated with the socket are
|
||||
* processed before going to the kernel.
|
||||
*/
|
||||
|
||||
timers_pending = nxt_timer_delete(engine, &c->read_timer);
|
||||
timers_pending += nxt_timer_delete(engine, &c->write_timer);
|
||||
|
||||
events_pending = nxt_fd_event_close(engine, &c->socket);
|
||||
|
||||
if (events_pending == 0) {
|
||||
nxt_socket_close(task, c->socket.fd);
|
||||
c->socket.fd = -1;
|
||||
|
||||
if (timers_pending == 0) {
|
||||
nxt_work_queue_add(&engine->fast_work_queue,
|
||||
c->write_state->ready_handler,
|
||||
task, c, c->socket.data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
c->write_timer.handler = nxt_conn_close_timer_handler;
|
||||
c->write_timer.work_queue = &engine->fast_work_queue;
|
||||
|
||||
nxt_timer_add(engine, &c->write_timer, 0);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nxt_conn_close_timer_handler(nxt_task_t *task, void *obj, void *data)
|
||||
{
|
||||
nxt_timer_t *timer;
|
||||
nxt_event_conn_t *c;
|
||||
|
||||
timer = obj;
|
||||
|
||||
c = nxt_event_write_timer_conn(timer);
|
||||
|
||||
nxt_debug(task, "conn close timer handler fd:%d", c->socket.fd);
|
||||
|
||||
if (c->socket.fd != -1) {
|
||||
nxt_socket_close(task, c->socket.fd);
|
||||
c->socket.fd = -1;
|
||||
}
|
||||
|
||||
nxt_work_queue_add(&task->thread->engine->fast_work_queue,
|
||||
c->write_state->ready_handler,
|
||||
task, c, c->socket.data);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nxt_conn_close_error_ignore(nxt_task_t *task, void *obj, void *data)
|
||||
{
|
||||
nxt_debug(task, "conn close error ignore");
|
||||
}
|
||||
@@ -994,7 +994,7 @@ nxt_epoll_event_conn_io_accept4(nxt_task_t *task, void *obj, void *data)
|
||||
nxt_event_conn_listen_t *cls;
|
||||
|
||||
cls = obj;
|
||||
c = data;
|
||||
c = cls->next;
|
||||
|
||||
cls->ready--;
|
||||
cls->socket.read_ready = (cls->ready != 0);
|
||||
|
||||
@@ -7,12 +7,6 @@
|
||||
#include <nxt_main.h>
|
||||
|
||||
|
||||
static void nxt_conn_shutdown_handler(nxt_task_t *task, void *obj, void *data);
|
||||
static void nxt_conn_close_handler(nxt_task_t *task, void *obj, void *data);
|
||||
static void nxt_conn_close_timer_handler(nxt_task_t *task, void *obj,
|
||||
void *data);
|
||||
|
||||
|
||||
nxt_event_conn_io_t nxt_unix_event_conn_io = {
|
||||
nxt_event_conn_io_connect,
|
||||
nxt_event_conn_io_accept,
|
||||
@@ -131,128 +125,6 @@ nxt_event_conn_io_shutdown(nxt_task_t *task, void *obj, void *data)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
nxt_event_conn_close(nxt_event_engine_t *engine, nxt_event_conn_t *c)
|
||||
{
|
||||
int ret;
|
||||
socklen_t len;
|
||||
struct linger linger;
|
||||
nxt_work_queue_t *wq;
|
||||
nxt_work_handler_t handler;
|
||||
|
||||
if (c->socket.timedout) {
|
||||
/*
|
||||
* Resetting of timed out connection on close
|
||||
* releases kernel memory associated with socket.
|
||||
* This also causes sending TCP/IP RST to a peer.
|
||||
*/
|
||||
linger.l_onoff = 1;
|
||||
linger.l_linger = 0;
|
||||
len = sizeof(struct linger);
|
||||
|
||||
ret = setsockopt(c->socket.fd, SOL_SOCKET, SO_LINGER, &linger, len);
|
||||
|
||||
if (nxt_slow_path(ret != 0)) {
|
||||
nxt_log(c->socket.task, NXT_LOG_CRIT,
|
||||
"setsockopt(%d, SO_LINGER) failed %E",
|
||||
c->socket.fd, nxt_socket_errno);
|
||||
}
|
||||
}
|
||||
|
||||
if (c->socket.error == 0 && !c->socket.closed && !c->socket.shutdown) {
|
||||
wq = &engine->shutdown_work_queue;
|
||||
handler = nxt_conn_shutdown_handler;
|
||||
|
||||
} else{
|
||||
wq = &engine->close_work_queue;
|
||||
handler = nxt_conn_close_handler;
|
||||
}
|
||||
|
||||
nxt_work_queue_add(wq, handler, c->socket.task, c, engine);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nxt_conn_shutdown_handler(nxt_task_t *task, void *obj, void *data)
|
||||
{
|
||||
nxt_event_conn_t *c;
|
||||
nxt_event_engine_t *engine;
|
||||
|
||||
c = obj;
|
||||
engine = data;
|
||||
|
||||
nxt_debug(task, "event conn shutdown fd:%d", c->socket.fd);
|
||||
|
||||
c->socket.shutdown = 1;
|
||||
|
||||
nxt_socket_shutdown(task, c->socket.fd, SHUT_RDWR);
|
||||
|
||||
nxt_work_queue_add(&engine->close_work_queue, nxt_conn_close_handler,
|
||||
task, c, engine);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nxt_conn_close_handler(nxt_task_t *task, void *obj, void *data)
|
||||
{
|
||||
nxt_uint_t events_pending, timers_pending;
|
||||
nxt_event_conn_t *c;
|
||||
nxt_event_engine_t *engine;
|
||||
|
||||
c = obj;
|
||||
engine = data;
|
||||
|
||||
nxt_debug(task, "event conn close fd:%d", c->socket.fd);
|
||||
|
||||
timers_pending = nxt_timer_delete(engine, &c->read_timer);
|
||||
timers_pending += nxt_timer_delete(engine, &c->write_timer);
|
||||
|
||||
events_pending = nxt_fd_event_close(engine, &c->socket);
|
||||
|
||||
if (events_pending == 0) {
|
||||
nxt_socket_close(task, c->socket.fd);
|
||||
c->socket.fd = -1;
|
||||
|
||||
if (timers_pending == 0) {
|
||||
nxt_work_queue_add(&engine->fast_work_queue,
|
||||
c->write_state->ready_handler,
|
||||
task, c, c->socket.data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
c->write_timer.handler = nxt_conn_close_timer_handler;
|
||||
c->write_timer.work_queue = &engine->fast_work_queue;
|
||||
|
||||
nxt_timer_add(engine, &c->write_timer, 0);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nxt_conn_close_timer_handler(nxt_task_t *task, void *obj, void *data)
|
||||
{
|
||||
nxt_timer_t *ev;
|
||||
nxt_event_conn_t *c;
|
||||
nxt_event_engine_t *engine;
|
||||
|
||||
ev = obj;
|
||||
|
||||
c = nxt_event_write_timer_conn(ev);
|
||||
|
||||
nxt_debug(task, "event conn close handler fd:%d", c->socket.fd);
|
||||
|
||||
if (c->socket.fd != -1) {
|
||||
nxt_socket_close(task, c->socket.fd);
|
||||
c->socket.fd = -1;
|
||||
}
|
||||
|
||||
engine = task->thread->engine;
|
||||
|
||||
nxt_work_queue_add(&engine->fast_work_queue, c->write_state->ready_handler,
|
||||
task, c, c->socket.data);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
nxt_event_conn_timer(nxt_event_engine_t *engine, nxt_event_conn_t *c,
|
||||
const nxt_event_conn_state_t *state, nxt_timer_t *tev)
|
||||
|
||||
@@ -100,6 +100,35 @@ typedef struct {
|
||||
} nxt_event_conn_io_t;
|
||||
|
||||
|
||||
/*
|
||||
* The nxt_event_conn_listen_t is separated from nxt_listen_socket_t
|
||||
* because nxt_listen_socket_t is one per process whilst each worker
|
||||
* thread uses own nxt_event_conn_listen_t.
|
||||
*/
|
||||
typedef struct {
|
||||
/* Must be the first field. */
|
||||
nxt_fd_event_t socket;
|
||||
|
||||
nxt_task_t task;
|
||||
|
||||
uint32_t ready;
|
||||
uint32_t batch;
|
||||
|
||||
/* An accept() interface is cached to minimize memory accesses. */
|
||||
nxt_work_handler_t accept;
|
||||
|
||||
nxt_listen_socket_t *listen;
|
||||
nxt_event_conn_t *next; /* STUB */;
|
||||
nxt_work_queue_t *work_queue;
|
||||
|
||||
nxt_timer_t timer;
|
||||
|
||||
nxt_queue_link_t link;
|
||||
} nxt_event_conn_listen_t;
|
||||
|
||||
typedef nxt_event_conn_listen_t nxt_listen_event_t;
|
||||
|
||||
|
||||
struct nxt_event_conn_s {
|
||||
/*
|
||||
* Must be the first field, since nxt_fd_event_t
|
||||
@@ -143,7 +172,7 @@ struct nxt_event_conn_s {
|
||||
nxt_task_t task;
|
||||
nxt_log_t log;
|
||||
|
||||
nxt_listen_socket_t *listen;
|
||||
nxt_event_conn_listen_t *listen;
|
||||
nxt_sockaddr_t *remote;
|
||||
nxt_sockaddr_t *local;
|
||||
const char *action;
|
||||
@@ -163,31 +192,6 @@ struct nxt_event_conn_s {
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* The nxt_event_conn_listen_t is separated from nxt_listen_socket_t
|
||||
* because nxt_listen_socket_t is one per process whilst each worker
|
||||
* thread uses own nxt_event_conn_listen_t.
|
||||
*/
|
||||
typedef struct {
|
||||
/* Must be the first field. */
|
||||
nxt_fd_event_t socket;
|
||||
|
||||
nxt_task_t task;
|
||||
|
||||
uint32_t ready;
|
||||
uint32_t batch;
|
||||
|
||||
/* An accept() interface is cached to minimize memory accesses. */
|
||||
nxt_work_handler_t accept;
|
||||
|
||||
nxt_listen_socket_t *listen;
|
||||
|
||||
nxt_timer_t timer;
|
||||
|
||||
nxt_queue_link_t link;
|
||||
} nxt_event_conn_listen_t;
|
||||
|
||||
|
||||
#define \
|
||||
nxt_event_conn_timer_init(ev, c, wq) \
|
||||
do { \
|
||||
@@ -256,6 +260,8 @@ nxt_int_t nxt_event_conn_socket(nxt_task_t *task, nxt_event_conn_t *c);
|
||||
void nxt_event_conn_connect_test(nxt_task_t *task, void *obj, void *data);
|
||||
void nxt_event_conn_connect_error(nxt_task_t *task, void *obj, void *data);
|
||||
|
||||
NXT_EXPORT nxt_event_conn_listen_t *nxt_listen_event(nxt_task_t *task,
|
||||
nxt_listen_socket_t *ls);
|
||||
NXT_EXPORT nxt_int_t nxt_event_conn_listen(nxt_task_t *task,
|
||||
nxt_listen_socket_t *ls);
|
||||
void nxt_event_conn_io_accept(nxt_task_t *task, void *obj, void *data);
|
||||
|
||||
@@ -32,6 +32,53 @@ static void nxt_event_conn_listen_timer_handler(nxt_task_t *task, void *obj,
|
||||
void *data);
|
||||
|
||||
|
||||
nxt_event_conn_listen_t *
|
||||
nxt_listen_event(nxt_task_t *task, nxt_listen_socket_t *ls)
|
||||
{
|
||||
nxt_event_engine_t *engine;
|
||||
nxt_event_conn_listen_t *cls;
|
||||
|
||||
cls = nxt_zalloc(sizeof(nxt_event_conn_listen_t));
|
||||
|
||||
if (nxt_fast_path(cls != NULL)) {
|
||||
cls->socket.fd = ls->socket;
|
||||
|
||||
engine = task->thread->engine;
|
||||
cls->batch = engine->batch;
|
||||
|
||||
cls->socket.read_work_queue = &engine->accept_work_queue;
|
||||
cls->socket.read_handler = nxt_event_conn_listen_handler;
|
||||
cls->socket.error_handler = nxt_event_conn_listen_event_error;
|
||||
cls->socket.log = &nxt_main_log;
|
||||
|
||||
cls->accept = engine->event.io->accept;
|
||||
|
||||
cls->listen = ls;
|
||||
cls->work_queue = &engine->read_work_queue;
|
||||
|
||||
cls->timer.work_queue = &engine->fast_work_queue;
|
||||
cls->timer.handler = nxt_event_conn_listen_timer_handler;
|
||||
cls->timer.log = &nxt_main_log;
|
||||
|
||||
cls->task.thread = task->thread;
|
||||
cls->task.log = &nxt_main_log;
|
||||
cls->task.ident = nxt_task_next_ident();
|
||||
cls->socket.task = &cls->task;
|
||||
cls->timer.task = &cls->task;
|
||||
|
||||
if (nxt_event_conn_accept_alloc(task, cls) != NULL) {
|
||||
nxt_fd_event_enable_accept(engine, &cls->socket);
|
||||
|
||||
nxt_queue_insert_head(&engine->listen_connections, &cls->link);
|
||||
}
|
||||
|
||||
return cls;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
nxt_int_t
|
||||
nxt_event_conn_listen(nxt_task_t *task, nxt_listen_socket_t *ls)
|
||||
{
|
||||
@@ -97,13 +144,12 @@ nxt_event_conn_accept_alloc(nxt_task_t *task, nxt_event_conn_listen_t *cls)
|
||||
/* This allocation cannot fail. */
|
||||
c = nxt_event_conn_create(mp, cls->socket.task);
|
||||
|
||||
cls->socket.data = c;
|
||||
cls->next = c;
|
||||
c->socket.read_work_queue = cls->socket.read_work_queue;
|
||||
c->socket.write_ready = 1;
|
||||
c->listen = cls;
|
||||
|
||||
ls = cls->listen;
|
||||
c->listen = ls;
|
||||
|
||||
/* This allocation cannot fail. */
|
||||
remote = nxt_sockaddr_alloc(mp, ls->socklen, ls->address_length);
|
||||
c->remote = remote;
|
||||
@@ -146,7 +192,7 @@ nxt_event_conn_io_accept(nxt_task_t *task, void *obj, void *data)
|
||||
nxt_event_conn_listen_t *cls;
|
||||
|
||||
cls = obj;
|
||||
c = data;
|
||||
c = cls->next;
|
||||
|
||||
cls->ready--;
|
||||
cls->socket.read_ready = (cls->ready != 0);
|
||||
@@ -200,17 +246,19 @@ nxt_event_conn_accept(nxt_task_t *task, nxt_event_conn_listen_t *cls,
|
||||
|
||||
nxt_queue_insert_head(&task->thread->engine->idle_connections, &c->link);
|
||||
|
||||
c->read_work_queue = c->listen->work_queue;
|
||||
c->write_work_queue = c->listen->work_queue;
|
||||
c->read_work_queue = cls->work_queue;
|
||||
c->write_work_queue = cls->work_queue;
|
||||
|
||||
if (c->listen->read_after_accept) {
|
||||
if (cls->listen->read_after_accept) {
|
||||
|
||||
//c->socket.read_ready = 1;
|
||||
c->listen->handler(task, c, NULL);
|
||||
// cls->listen->handler(task, c, cls->socket.data);
|
||||
nxt_work_queue_add(c->read_work_queue, cls->listen->handler,
|
||||
task, c, cls->socket.data);
|
||||
|
||||
} else {
|
||||
nxt_work_queue_add(c->write_work_queue, c->listen->handler,
|
||||
task, c, NULL);
|
||||
nxt_work_queue_add(c->write_work_queue, cls->listen->handler,
|
||||
task, c, cls->socket.data);
|
||||
}
|
||||
|
||||
next = nxt_event_conn_accept_next(task, cls);
|
||||
@@ -227,7 +275,7 @@ nxt_event_conn_accept_next(nxt_task_t *task, nxt_event_conn_listen_t *cls)
|
||||
{
|
||||
nxt_event_conn_t *c;
|
||||
|
||||
cls->socket.data = NULL;
|
||||
cls->next = NULL;
|
||||
|
||||
do {
|
||||
c = nxt_event_conn_accept_alloc(task, cls);
|
||||
@@ -338,7 +386,7 @@ nxt_event_conn_listen_timer_handler(nxt_task_t *task, void *obj, void *data)
|
||||
ev = obj;
|
||||
|
||||
cls = nxt_timer_data(ev, nxt_event_conn_listen_t, timer);
|
||||
c = cls->socket.data;
|
||||
c = cls->next;
|
||||
|
||||
if (c == NULL) {
|
||||
c = nxt_event_conn_accept_next(task, cls);
|
||||
|
||||
@@ -44,9 +44,6 @@ nxt_event_engine_create(nxt_task_t *task,
|
||||
engine->task.log = thread->log;
|
||||
engine->task.ident = nxt_task_next_ident();
|
||||
|
||||
thread->engine = engine;
|
||||
thread->fiber = &engine->fibers->fiber;
|
||||
|
||||
engine->batch = batch;
|
||||
|
||||
if (flags & NXT_ENGINE_FIBERS) {
|
||||
@@ -121,6 +118,7 @@ nxt_event_engine_create(nxt_task_t *task,
|
||||
|
||||
engine->max_connections = 0xffffffff;
|
||||
|
||||
nxt_queue_init(&engine->joints);
|
||||
nxt_queue_init(&engine->listen_connections);
|
||||
nxt_queue_init(&engine->idle_connections);
|
||||
|
||||
|
||||
@@ -354,6 +354,11 @@ nxt_fd_event_disable(engine, ev) \
|
||||
(engine)->event.disable(engine, ev)
|
||||
|
||||
|
||||
#define \
|
||||
nxt_fd_event_delete(engine, ev) \
|
||||
(engine)->event.delete(engine, ev)
|
||||
|
||||
|
||||
#define \
|
||||
nxt_fd_event_close(engine, ev) \
|
||||
(engine)->event.close(engine, ev)
|
||||
@@ -481,8 +486,13 @@ struct nxt_event_engine_s {
|
||||
uint32_t connections;
|
||||
uint32_t max_connections;
|
||||
|
||||
nxt_port_t *port;
|
||||
nxt_mem_cache_pool_t *mem_pool;
|
||||
nxt_queue_t joints;
|
||||
nxt_queue_t listen_connections;
|
||||
nxt_queue_t idle_connections;
|
||||
|
||||
nxt_queue_link_t link;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -522,6 +522,8 @@ nxt_kqueue_fd_error_handler(nxt_task_t *task, void *obj, void *data)
|
||||
|
||||
ev = obj;
|
||||
|
||||
nxt_debug(task, "kqueue fd error handler fd:%d", ev->fd);
|
||||
|
||||
if (ev->kq_eof && ev->kq_errno != 0) {
|
||||
ev->error = ev->kq_errno;
|
||||
nxt_log(task, nxt_socket_error_level(ev->kq_errno),
|
||||
@@ -544,6 +546,8 @@ nxt_kqueue_file_error_handler(nxt_task_t *task, void *obj, void *data)
|
||||
|
||||
ev = obj;
|
||||
|
||||
nxt_debug(task, "kqueue file error handler fd:%d", ev->file->fd);
|
||||
|
||||
ev->handler(task, ev, data);
|
||||
}
|
||||
|
||||
@@ -924,7 +928,7 @@ nxt_kqueue_event_conn_io_accept(nxt_task_t *task, void *obj, void *data)
|
||||
nxt_event_conn_listen_t *cls;
|
||||
|
||||
cls = obj;
|
||||
c = data;
|
||||
c = cls->next;
|
||||
|
||||
cls->ready--;
|
||||
cls->socket.read_ready = (cls->ready != 0);
|
||||
|
||||
@@ -31,9 +31,8 @@ typedef struct {
|
||||
uint8_t socklen;
|
||||
uint8_t address_length;
|
||||
|
||||
uint32_t count;
|
||||
uint32_t mem_pool_size;
|
||||
|
||||
void *servers;
|
||||
} nxt_listen_socket_t;
|
||||
|
||||
|
||||
|
||||
1082
src/nxt_router.c
1082
src/nxt_router.c
File diff suppressed because it is too large
Load Diff
78
src/nxt_router.h
Normal file
78
src/nxt_router.h
Normal file
@@ -0,0 +1,78 @@
|
||||
|
||||
/*
|
||||
* Copyright (C) Igor Sysoev
|
||||
* Copyright (C) NGINX, Inc.
|
||||
*/
|
||||
|
||||
#ifndef _NXT_ROUTER_H_INCLUDED_
|
||||
#define _NXT_ROUTER_H_INCLUDED_
|
||||
|
||||
|
||||
#include <nxt_main.h>
|
||||
#include <nxt_runtime.h>
|
||||
#include <nxt_master_process.h>
|
||||
|
||||
|
||||
typedef struct {
|
||||
nxt_thread_spinlock_t lock;
|
||||
nxt_queue_t engines;
|
||||
|
||||
nxt_queue_t sockets;
|
||||
} nxt_router_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
uint32_t count;
|
||||
uint32_t threads;
|
||||
nxt_router_t *router;
|
||||
nxt_mem_pool_t *mem_pool;
|
||||
} nxt_router_conf_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
nxt_event_engine_t *engine;
|
||||
nxt_task_t task;
|
||||
nxt_array_t *creating; /* of nxt_work_t */
|
||||
nxt_array_t *updating; /* of nxt_work_t */
|
||||
nxt_array_t *deleting; /* of nxt_work_t */
|
||||
} nxt_router_engine_conf_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
nxt_queue_t creating;
|
||||
nxt_queue_t pending;
|
||||
nxt_queue_t updating;
|
||||
nxt_queue_t keeping;
|
||||
nxt_queue_t deleting;
|
||||
|
||||
uint32_t new_threads;
|
||||
|
||||
nxt_array_t *engines;
|
||||
nxt_router_conf_t *conf;
|
||||
nxt_mem_pool_t *mem_pool;
|
||||
} nxt_router_temp_conf_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
uint32_t count;
|
||||
nxt_listen_socket_t listen;
|
||||
nxt_queue_link_t link;
|
||||
nxt_router_conf_t *router_conf;
|
||||
|
||||
size_t header_buffer_size;
|
||||
size_t large_header_buffer_size;
|
||||
nxt_msec_t header_read_timeout;
|
||||
} nxt_socket_conf_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
uint32_t count;
|
||||
nxt_queue_link_t link;
|
||||
nxt_event_engine_t *engine;
|
||||
nxt_socket_conf_t *socket_conf;
|
||||
|
||||
/* Modules configuraitons. */
|
||||
} nxt_socket_conf_joint_t;
|
||||
|
||||
|
||||
#endif /* _NXT_ROUTER_H_INCLUDED_ */
|
||||
@@ -253,20 +253,10 @@ nxt_runtime_systemd_listen_sockets(nxt_task_t *task, nxt_runtime_t *rt)
|
||||
static nxt_int_t
|
||||
nxt_runtime_event_engines(nxt_task_t *task, nxt_runtime_t *rt)
|
||||
{
|
||||
nxt_event_engine_t *engine, **e;
|
||||
nxt_thread_t *thread;
|
||||
nxt_event_engine_t *engine;
|
||||
const nxt_event_interface_t *interface;
|
||||
|
||||
rt->engines = nxt_array_create(rt->mem_pool, 1,
|
||||
sizeof(nxt_event_engine_t *));
|
||||
if (nxt_slow_path(rt->engines == NULL)) {
|
||||
return NXT_ERROR;
|
||||
}
|
||||
|
||||
e = nxt_array_add(rt->engines);
|
||||
if (nxt_slow_path(e == NULL)) {
|
||||
return NXT_ERROR;
|
||||
}
|
||||
|
||||
interface = nxt_service_get(rt->services, "engine", NULL);
|
||||
|
||||
if (nxt_slow_path(interface == NULL)) {
|
||||
@@ -281,8 +271,14 @@ nxt_runtime_event_engines(nxt_task_t *task, nxt_runtime_t *rt)
|
||||
return NXT_ERROR;
|
||||
}
|
||||
|
||||
thread = task->thread;
|
||||
thread->engine = engine;
|
||||
thread->fiber = &engine->fibers->fiber;
|
||||
|
||||
engine->id = rt->last_engine_id++;
|
||||
*e = engine;
|
||||
|
||||
nxt_queue_init(&rt->engines);
|
||||
nxt_queue_insert_tail(&rt->engines, &engine->link);
|
||||
|
||||
return NXT_OK;
|
||||
}
|
||||
@@ -587,12 +583,13 @@ nxt_runtime_event_engine_change(nxt_task_t *task, nxt_runtime_t *rt)
|
||||
void
|
||||
nxt_runtime_event_engine_free(nxt_runtime_t *rt)
|
||||
{
|
||||
nxt_event_engine_t *engine, **engines;
|
||||
nxt_queue_link_t *link;
|
||||
nxt_event_engine_t *engine;
|
||||
|
||||
engines = rt->engines->elts;
|
||||
engine = engines[0];
|
||||
nxt_array_remove(rt->engines, &engines[0]);
|
||||
link = nxt_queue_first(&rt->engines);
|
||||
nxt_queue_remove(link);
|
||||
|
||||
engine = nxt_queue_link_data(link, nxt_event_engine_t, link);
|
||||
nxt_event_engine_free(engine);
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ struct nxt_runtime_s {
|
||||
nxt_array_t *listen_sockets; /* of nxt_listen_socket_t */
|
||||
|
||||
nxt_array_t *services; /* of nxt_service_t */
|
||||
nxt_array_t *engines; /* of nxt_event_engine_t */
|
||||
void *data;
|
||||
|
||||
nxt_runtime_cont_t start;
|
||||
|
||||
@@ -61,6 +61,8 @@ struct nxt_runtime_s {
|
||||
const char *pid;
|
||||
const char *error_log;
|
||||
|
||||
nxt_queue_t engines; /* of nxt_event_engine_t */
|
||||
|
||||
nxt_sockaddr_t *controller_listen;
|
||||
nxt_listen_socket_t *controller_socket;
|
||||
nxt_str_t upstream;
|
||||
|
||||
@@ -141,7 +141,7 @@ nxt_signal_thread_start(nxt_event_engine_t *engine)
|
||||
|
||||
if (nxt_fast_path(link != NULL)) {
|
||||
link->start = nxt_signal_thread;
|
||||
link->data = engine;
|
||||
link->work.data = engine;
|
||||
|
||||
if (nxt_thread_create(&engine->signals->thread, link) == NXT_OK) {
|
||||
engine->signals->process = nxt_pid;
|
||||
|
||||
@@ -114,9 +114,9 @@ nxt_thread_trampoline(void *data)
|
||||
pthread_cleanup_push(nxt_thread_time_cleanup, thr);
|
||||
|
||||
start = link->start;
|
||||
data = link->data;
|
||||
data = link->work.data;
|
||||
|
||||
if (link->engine != NULL) {
|
||||
if (link->work.handler != NULL) {
|
||||
thr->link = link;
|
||||
|
||||
} else {
|
||||
@@ -181,6 +181,7 @@ void
|
||||
nxt_thread_exit(nxt_thread_t *thr)
|
||||
{
|
||||
nxt_thread_link_t *link;
|
||||
nxt_event_engine_t *engine;
|
||||
|
||||
nxt_log_debug(thr->log, "thread exit");
|
||||
|
||||
@@ -189,13 +190,14 @@ nxt_thread_exit(nxt_thread_t *thr)
|
||||
|
||||
if (link != NULL) {
|
||||
/*
|
||||
* link->handler is already set to an exit handler,
|
||||
* and link->task is already set to engine->task.
|
||||
* link->work.handler is already set to an exit handler,
|
||||
* and link->work.task is already set to the correct engine->task.
|
||||
* The link should be freed by the exit handler.
|
||||
*/
|
||||
link->work.obj = (void *) (uintptr_t) thr->handle;
|
||||
engine = nxt_container_of(link->work.task, nxt_event_engine_t, task);
|
||||
|
||||
nxt_event_engine_post(link->engine, &link->work);
|
||||
nxt_event_engine_post(engine, &link->work);
|
||||
}
|
||||
|
||||
nxt_thread_time_free(thr);
|
||||
|
||||
@@ -90,7 +90,6 @@ typedef void (*nxt_thread_start_t)(void *data);
|
||||
|
||||
typedef struct {
|
||||
nxt_thread_start_t start;
|
||||
void *data;
|
||||
nxt_event_engine_t *engine;
|
||||
nxt_work_t work;
|
||||
} nxt_thread_link_t;
|
||||
@@ -180,6 +179,7 @@ struct nxt_thread_s {
|
||||
|
||||
nxt_runtime_t *runtime;
|
||||
nxt_event_engine_t *engine;
|
||||
void *data;
|
||||
|
||||
/*
|
||||
* Although pointer to a current fiber should be a property of
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
static nxt_int_t nxt_thread_pool_init(nxt_thread_pool_t *tp);
|
||||
static void nxt_thread_pool_exit(nxt_task_t *task, void *obj, void *data);
|
||||
static void nxt_thread_pool_start(void *ctx);
|
||||
static void nxt_thread_pool_loop(void *ctx);
|
||||
static void nxt_thread_pool_wait(nxt_thread_pool_t *tp);
|
||||
|
||||
|
||||
@@ -86,13 +87,8 @@ nxt_thread_pool_init(nxt_thread_pool_t *tp)
|
||||
|
||||
if (nxt_fast_path(link != NULL)) {
|
||||
link->start = nxt_thread_pool_start;
|
||||
link->data = tp;
|
||||
link->engine = tp->engine;
|
||||
/*
|
||||
* link->exit is not used. link->engine is used just to
|
||||
* set thr->link by nxt_thread_trampoline() and the link
|
||||
* is a mark of the first thread of pool.
|
||||
*/
|
||||
link->work.data = tp;
|
||||
|
||||
if (nxt_thread_create(&handle, link) == NXT_OK) {
|
||||
tp->ready = 1;
|
||||
goto done;
|
||||
@@ -117,6 +113,22 @@ done:
|
||||
|
||||
static void
|
||||
nxt_thread_pool_start(void *ctx)
|
||||
{
|
||||
nxt_thread_t *thr;
|
||||
nxt_thread_pool_t *tp;
|
||||
|
||||
tp = ctx;
|
||||
thr = nxt_thread();
|
||||
|
||||
tp->main = thr->handle;
|
||||
tp->task.thread = thr;
|
||||
|
||||
nxt_thread_pool_loop(ctx);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nxt_thread_pool_loop(void *ctx)
|
||||
{
|
||||
void *obj, *data;
|
||||
nxt_task_t *task;
|
||||
@@ -127,17 +139,6 @@ nxt_thread_pool_start(void *ctx)
|
||||
tp = ctx;
|
||||
thr = nxt_thread();
|
||||
|
||||
if (thr->link != NULL) {
|
||||
/* Only the first thread has a link. */
|
||||
tp->main = thr->handle;
|
||||
nxt_free(thr->link);
|
||||
thr->link = NULL;
|
||||
|
||||
tp->task.thread = thr;
|
||||
}
|
||||
|
||||
thr->thread_pool = tp;
|
||||
|
||||
if (tp->init != NULL) {
|
||||
tp->init();
|
||||
}
|
||||
@@ -215,8 +216,8 @@ nxt_thread_pool_wait(nxt_thread_pool_t *tp)
|
||||
link = nxt_zalloc(sizeof(nxt_thread_link_t));
|
||||
|
||||
if (nxt_fast_path(link != NULL)) {
|
||||
link->start = nxt_thread_pool_start;
|
||||
link->data = tp;
|
||||
link->start = nxt_thread_pool_loop;
|
||||
link->work.data = tp;
|
||||
|
||||
if (nxt_thread_create(&handle, link) != NXT_OK) {
|
||||
(void) nxt_atomic_fetch_add(&tp->threads, -1);
|
||||
@@ -232,6 +233,8 @@ nxt_thread_pool_destroy(nxt_thread_pool_t *tp)
|
||||
|
||||
thr = nxt_thread();
|
||||
|
||||
nxt_log_debug(thr->log, "thread pool destroy: %d", tp->ready);
|
||||
|
||||
if (!tp->ready) {
|
||||
nxt_work_queue_add(&thr->engine->fast_work_queue, tp->exit,
|
||||
&tp->task, tp, NULL);
|
||||
|
||||
@@ -123,7 +123,7 @@ nxt_time_thread_start(nxt_msec_t interval)
|
||||
|
||||
if (nxt_fast_path(link != NULL)) {
|
||||
link->start = nxt_time_thread;
|
||||
link->data = (void *) (uintptr_t) interval;
|
||||
link->work.data = (void *) (uintptr_t) interval;
|
||||
|
||||
(void) nxt_thread_create(&handle, link);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user