Initial version.

This commit is contained in:
Igor Sysoev
2017-01-17 20:00:00 +03:00
commit 16cbf3c076
235 changed files with 56359 additions and 0 deletions

114
auto/atomic Normal file
View File

@@ -0,0 +1,114 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# GCC 4.1+ builtin atomic operations.
nxt_feature="GCC builtin atomic operations"
nxt_feature_name=NXT_HAVE_GCC_ATOMIC
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="int main() {
long n = 0;
if (!__sync_bool_compare_and_swap(&n, 0, 3))
return 1;
if (__sync_fetch_and_add(&n, 1) != 3)
return 1;
if (__sync_lock_test_and_set(&n, 5) != 4)
return 1;
if (n != 5)
return 1;
__sync_lock_release(&n);
if (n != 0)
return 1;
return 0;
}"
. auto/feature
# Solaris 10 builtin atomic operations.
if [ $nxt_found = no ]; then
nxt_feature="Solaris builtin atomic operations"
nxt_feature_name=NXT_HAVE_SOLARIS_ATOMIC
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <atomic.h>
int main() {
ulong_t n = 0;
if (atomic_cas_ulong(&n, 0, 3) != 0)
return 1;
if (atomic_add_long_nv(&n, 1) != 4)
return 1;
if (atomic_swap_ulong(&n, 5) != 4)
return 1;
if (n != 5)
return 1;
return 0;
}"
. auto/feature
fi
# AIX xlC builtin atomic operations.
if [ $nxt_found = no ]; then
if [ $NXT_64BIT = 1 ]; then
nxt_feature_test="int main() {
long n = 0;
long o = 0;
if (!__compare_and_swaplp(&n, &o, 3))
return 1;
if (__fetch_and_addlp(&n, 1) != 3)
return 1;
if (__fetch_and_swaplp(&n, 5) != 4)
return 1;
if (n != 5)
return 1;
__isync();
__lwsync();
return 0;
}"
else
nxt_feature_test="int main() {
int n = 0;
int o = 0;
if (!__compare_and_swap(&n, &o, 3))
return 1;
if (__fetch_and_add(&n, 1) != 3)
return 1;
if (__fetch_and_swap(&n, 5) != 4)
return 1;
if (n != 5)
return 1;
__isync();
__lwsync();
return 0;
}"
fi
nxt_feature="xlC builtin atomic operations"
nxt_feature_name=NXT_HAVE_XLC_ATOMIC
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
. auto/feature
fi
if [ $nxt_found = no ]; then
$echo
$echo $0: error: no atomic operations found.
$echo
exit 1;
fi

209
auto/cc/test Normal file
View File

@@ -0,0 +1,209 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
$echo checking for C compiler: $CC
cat << END >> $NXT_AUTOCONF_ERR
----------------------------------------
checking for C compiler: $CC
END
# Allow error exit status.
set +e
if [ -z `which $CC` ]; then
$echo
$echo $0: error: $CC not found.
$echo
exit 1;
fi
if `/bin/sh -c "($CC -v)" 2>&1 | grep "gcc version" >> $NXT_AUTOCONF_ERR 2>&1`
then
NXT_CC_NAME=gcc
$echo " + using GNU C compiler"
NXT_CC_VERSION=`/bin/sh -c "($CC -v)" 2>&1 | grep "gcc version" 2>&1`
$echo " + $NXT_CC_VERSION"
else
if `/bin/sh -c "($CC -v)" 2>&1 | grep "clang version" >> $NXT_AUTOCONF_ERR 2>&1`
then
NXT_CC_NAME=clang
$echo " + using Clang C compiler"
NXT_CC_VERSION=`/bin/sh -c "($CC -v)" 2>&1 | grep "clang version" 2>&1`
$echo " + $NXT_CC_VERSION"
else
if `/bin/sh -c "($CC -v)" 2>&1 \
| grep "Apple LLVM version" >> $NXT_AUTOCONF_ERR 2>&1`
then
NXT_CC_NAME=clang
$echo " + using Clang C compiler"
NXT_CC_VERSION=`/bin/sh -c "($CC -v)" 2>&1 | grep "Apple LLVM version" 2>&1`
$echo " + $NXT_CC_VERSION"
else
if `/bin/sh -c "($CC -V)" 2>&1 | grep "Sun C" >> $NXT_AUTOCONF_ERR 2>&1`
then
NXT_CC_NAME=SunC
$echo " + using Sun C compiler"
NXT_CC_VERSION=`/bin/sh -c "($CC -V)" 2>&1 | grep "Sun C" 2>&1`
$echo " + $NXT_CC_VERSION"
else
if `/bin/sh -c "($CC -qversion)" 2>&1 \
| grep "^IBM XL" >> $NXT_AUTOCONF_ERR 2>&1`
then
NXT_CC_NAME=xlC
$echo " + using AIX xlC compiler"
NXT_CC_VERSION=`/bin/sh -c "($CC -qversion)" 2>&1 | grep "IBM XL" 2>&1`
$echo " + $NXT_CC_VERSION"
else
if `/bin/sh -c "($CC -V)" 2>&1 | grep "Intel(R) C" >> $NXT_AUTOCONF_ERR 2>&1`
then
NXT_CC_NAME=ICC
$echo " + using Intel C++ compiler"
NXT_CC_VERSION=ICC
else
if `/bin/sh -c "($CC -v)" 2>&1 \
| grep "Microsoft (R) 32-bit C/C" >> $NXT_AUTOCONF_ERR 2>&1`
then
NXT_CC_NAME=MSVC
$echo " + using MS Visual C++ compiler"
NXT_CC_VERSION=MSVC
else
NXT_CC_NAME=cc
NXT_CC_VERSION=cc
fi # MSVC
fi # ICC
fi # xlC
fi # SunC
fi # Apple LLVM clang
fi # clang
fi # gcc
case $NXT_CC_NAME in
gcc)
nxt_have=NXT_GCC . auto/have
NXT_CFLAGS="$NXT_CFLAGS -pipe"
NXT_CFLAGS="$NXT_CFLAGS -fPIC"
# Do not export symbols except explicitly marked with NXT_EXPORT.
NXT_CFLAGS="$NXT_CFLAGS -fvisibility=hidden"
# c99/gnu99 conflict with Solaris XOPEN.
#NXT_CFLAGS="$NXT_CFLAGS -std=gnu99"
NXT_CFLAGS="$NXT_CFLAGS -O"
#NXT_CFLAGS="$NXT_CFLAGS -O0"
NXT_CFLAGS="$NXT_CFLAGS -W -Wall -Wextra"
#NXT_CFLAGS="$NXT_CFLAGS -Wunused-result"
NXT_CFLAGS="$NXT_CFLAGS -Wno-unused-parameter"
#NXT_CFLAGS="$NXT_CFLAGS -Wshorten-64-to-32"
NXT_CFLAGS="$NXT_CFLAGS -Wwrite-strings"
# -O2 enables -fstrict-aliasing and -fstrict-overflow.
#NXT_CFLAGS="$NXT_CFLAGS -O2"
#NXT_CFLAGS="$NXT_CFLAGS -Wno-strict-aliasing"
#NXT_CFLAGS="$NXT_CFLAGS -fomit-frame-pointer"
#NXT_CFLAGS="$NXT_CFLAGS -momit-leaf-frame-pointer"
# -Wstrict-overflow is supported by GCC 4.2+.
#NXT_CFLAGS="$NXT_CFLAGS -Wstrict-overflow=5"
NXT_CFLAGS="$NXT_CFLAGS -Wmissing-prototypes"
# Stop on warning.
NXT_CFLAGS="$NXT_CFLAGS -Werror"
# Debug.
NXT_CFLAGS="$NXT_CFLAGS -g"
;;
clang)
nxt_have=NXT_CLANG . auto/have
NXT_CFLAGS="$NXT_CFLAGS -pipe"
NXT_CFLAGS="$NXT_CFLAGS -fPIC"
# Do not export symbols except explicitly marked with NXT_EXPORT.
NXT_CFLAGS="$NXT_CFLAGS -fvisibility=hidden"
NXT_CFLAGS="$NXT_CFLAGS -O"
#NXT_CFLAGS="$NXT_CFLAGS -O0"
NXT_CFLAGS="$NXT_CFLAGS -W -Wall -Wextra"
#NXT_CFLAGS="$NXT_CFLAGS -Wunused-result"
NXT_CFLAGS="$NXT_CFLAGS -Wno-unused-parameter"
#NXT_CFLAGS="$NXT_CFLAGS -Wshorten-64-to-32"
NXT_CFLAGS="$NXT_CFLAGS -Wwrite-strings"
#NXT_CFLAGS="$NXT_CFLAGS -O2"
#NXT_CFLAGS="$NXT_CFLAGS -fomit-frame-pointer"
NXT_CFLAGS="$NXT_CFLAGS -fstrict-aliasing"
NXT_CFLAGS="$NXT_CFLAGS -Wstrict-overflow=5"
NXT_CFLAGS="$NXT_CFLAGS -Wmissing-prototypes"
# Stop on warning.
NXT_CFLAGS="$NXT_CFLAGS -Werror"
# Debug.
if [ "$NXT_SYSTEM_PLATFORM" != "powerpc" ]; then
# "-g" flag causes the "unknown pseudo-op: `.cfi_sections'"
# error on PowerPC Clang.
NXT_CFLAGS="$NXT_CFLAGS -g"
fi
;;
SunC)
nxt_have=NXT_SUNC . auto/have
NXT_CFLAGS="$NXT_CFLAGS -fPIC"
# Optimization.
NXT_CFLAGS="$NXT_CFLAGS -O -fast"
# Stop on warning.
NXT_CFLAGS="$NXT_CFLAGS -errwarn=%all"
# Debug.
NXT_CFLAGS="$NXT_CFLAGS -g"
;;
xlC)
nxt_have=NXT_XLC . auto/have
#NXT_CFLAGS="$NXT_CFLAGS -qalloca"
# alloca support.
NXT_CFLAGS="$NXT_CFLAGS -qlanglvl=extc99"
# __thread support.
NXT_CFLAGS="$NXT_CFLAGS -qtls"
# Suppress warning
# 1506-159 (E) Bit field type specified for XXX is not valid.
# Type unsigned assumed.
NXT_CFLAGS="$NXT_CFLAGS -qsuppress=1506-159"
;;
ICC)
;;
MSVC)
;;
*)
;;
esac
# Stop on error exit status again.
set -e

135
auto/clang Normal file
View File

@@ -0,0 +1,135 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# C language features.
nxt_feature="C99 variadic macro"
nxt_feature_name=NXT_HAVE_C99_VARIADIC_MACRO
nxt_feature_run=yes
nxt_feature_path=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#define set(dummy, ...) sprintf(__VA_ARGS__)
int main() {
char buf[4];
buf[0] = '0';
set(0, buf, \"%d\", 1);
if (buf[0] == '1')
return 0;
return 1;
}"
. auto/feature
if [ $nxt_found = no ]; then
nxt_feature="GCC variadic macro"
nxt_feature_name=NXT_HAVE_GCC_VARIADIC_MACRO
nxt_feature_run=yes
nxt_feature_path=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#define set(dummy, args...) sprintf(args)
int main() {
char buf[4];
buf[0] = '0';
set(0, buf, \"%d\", 1);
if (buf[0] == '1')
return 0;
return 1;
}"
. auto/feature
fi
nxt_feature="GCC __builtin_expect()"
nxt_feature_name=NXT_HAVE_BUILTIN_EXPECT
nxt_feature_run=no
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="int main(int argc, char *const *argv) {
if ((__typeof__(argc == 0))
__builtin_expect((argc == 0), 0))
return 0;
return 1;
}"
. auto/feature
nxt_feature="GCC __builtin_unreachable()"
nxt_feature_name=NXT_HAVE_BUILTIN_UNREACHABLE
nxt_feature_run=no
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="int main() {
__builtin_unreachable();
}"
. auto/feature
nxt_feature="GCC __builtin_prefetch()"
nxt_feature_name=NXT_HAVE_BUILTIN_PREFETCH
nxt_feature_run=no
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="int main() {
__builtin_prefetch(0);
}"
. auto/feature
nxt_feature="GCC __attribute__ visibility"
nxt_feature_name=NXT_HAVE_GCC_ATTRIBUTE_VISIBILITY
nxt_feature_run=
nxt_feature_path=
nxt_feature_libs=
nxt_feature_test="int n __attribute__ ((visibility(\"default\")));
int main() {
return 1;
}"
. auto/feature
nxt_feature="GCC __attribute__ aligned"
nxt_feature_name=NXT_HAVE_GCC_ATTRIBUTE_ALIGNED
nxt_feature_run=
nxt_feature_path=
nxt_feature_libs=
nxt_feature_test="int n __attribute__ ((aligned(64)));
int main() {
return 1;
}"
. auto/feature
nxt_feature="GCC __attribute__ malloc"
nxt_feature_name=NXT_HAVE_GCC_ATTRIBUTE_MALLOC
nxt_feature_run=
nxt_feature_path=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
void *f(void) __attribute__ ((__malloc__));
void *f(void) {
return malloc(1);
}
int main() {
if (f() != NULL) {
return 1;
}
return 0;
}"
. auto/feature

3
auto/echo/Makefile Normal file
View File

@@ -0,0 +1,3 @@
echo.exe: echo.c
mingw32-gcc -o echo.exe -O2 echo.c

26
auto/echo/build Normal file
View File

@@ -0,0 +1,26 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
$echo 'building an "echo" program'
rm -f $NXT_BUILD_DIR/echo
nxt_echo_test="$CC -o $NXT_BUILD_DIR/echo -O $NXT_CC_OPT
auto/echo/echo.c $NXT_LD_OPT"
nxt_echo_err=`$nxt_echo_test 2>&1`
if [ ! -x $NXT_BUILD_DIR/echo ]; then
$echo
$echo $0: error: cannot build an \"echo\" program:
$echo
$echo $nxt_echo_test
$echo
$echo $nxt_echo_err
$echo
exit 1
fi
echo=$NXT_BUILD_DIR/echo

43
auto/echo/echo.c Normal file
View File

@@ -0,0 +1,43 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*
* A portable "echo" program that supports "-n" option:
* echo Hello world!
* echo "Hello world!"
* echo -n Hello world!
* echo
*
* It also passes "\c" characters as is.
*/
#include <stdio.h>
#include <string.h>
int
main(int argc, char *const *argv)
{
int i = 1;
int nl = 1;
if (argc > 1) {
if (strcmp(argv[1], "-n") == 0) {
nl = 0;
i++;
}
while (i < argc) {
printf("%s%s", argv[i], (i == argc - 1) ? "" : " ");
i++;
}
}
if (nl) {
printf("\n");
}
return 0;
}

196
auto/events Normal file
View File

@@ -0,0 +1,196 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# Linux epoll.
nxt_feature="Linux epoll"
nxt_feature_name=NXT_HAVE_EPOLL
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <sys/epoll.h>
#include <unistd.h>
int main() {
int n;
n = epoll_create(1);
close(n);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_EPOLL=YES
nxt_feature="Linux signalfd()"
nxt_feature_name=NXT_HAVE_SIGNALFD
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <signal.h>
#include <sys/signalfd.h>
#include <unistd.h>
int main() {
int n;
sigset_t mask;
sigemptyset(&mask);
n = signalfd(-1, &mask, 0);
close(n);
return 0;
}"
. auto/feature
nxt_feature="Linux eventfd()"
nxt_feature_name=NXT_HAVE_EVENTFD
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <sys/eventfd.h>
#include <unistd.h>
int main() {
int n;
n = eventfd(0, 0);
close(n);
return 0;
}"
. auto/feature
else
NXT_HAVE_EPOLL=NO
fi
# FreeBSD, MacOSX, NetBSD, OpenBSD kqueue.
nxt_feature="kqueue"
nxt_feature_name=NXT_HAVE_KQUEUE
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <sys/types.h>
#include <sys/event.h>
#include <unistd.h>
int main() {
int n;
n = kqueue();
close(n);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_KQUEUE=YES
nxt_feature="kqueue EVFILT_USER"
nxt_feature_name=NXT_HAVE_EVFILT_USER
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <sys/types.h>
#include <sys/event.h>
int main() {
struct kevent kev;
kev.filter = EVFILT_USER;
kevent(0, &kev, 1, NULL, 0, NULL);
return 0;
}"
. auto/feature
else
NXT_HAVE_KQUEUE=NO
fi
# Solaris event port.
nxt_feature="Solaris event port"
nxt_feature_name=NXT_HAVE_EVENTPORT
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <port.h>
#include <unistd.h>
int main() {
int n;
n = port_create();
close(n);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_EVENTPORT=YES
else
NXT_HAVE_EVENTPORT=NO
fi
# Solaris, HP-UX, IRIX, Tru64 UNIX /dev/poll.
nxt_feature="/dev/poll"
nxt_feature_name=NXT_HAVE_DEVPOLL
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/devpoll.h>
#include <unistd.h>
int main() {
int n;
n = open(\"/dev/poll\", O_RDWR);
close(n);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_DEVPOLL=YES
else
NXT_HAVE_DEVPOLL=NO
fi
# AIX pollset.
nxt_feature="AIX pollset"
nxt_feature_name=NXT_HAVE_POLLSET
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <fcntl.h>
#include <sys/poll.h>
#include <sys/pollset.h>
#include <unistd.h>
int main() {
pollset_t n;
n = pollset_create(-1);
pollset_destroy(n);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_POLLSET=YES
else
NXT_HAVE_POLLSET=NO
fi

112
auto/feature Normal file
View File

@@ -0,0 +1,112 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
$echo -n "checking for $nxt_feature ..."
cat << END >> $NXT_AUTOCONF_ERR
----------------------------------------
checking for $nxt_feature
END
nxt_found=no
nxt_feature_value=
nxt_feature_inc_path=
if test -n "$nxt_feature_incs"; then
case "$nxt_feature_incs" in
-*)
nxt_feature_inc_path="$nxt_feature_incs"
;;
*)
for nxt_temp in $nxt_feature_incs; do
nxt_feature_inc_path="$nxt_feature_inc_path -I $nxt_temp"
done
;;
esac
fi
cat << END > $NXT_AUTOTEST.c
$nxt_feature_test
END
nxt_test="$CC $CFLAGS $NXT_CFLAGS $NXT_CC_OPT $NXT_TEST_CFLAGS \
$nxt_feature_inc_path -o $NXT_AUTOTEST $NXT_AUTOTEST.c \
$NXT_LD_OPT $NXT_TEST_LIBS $nxt_feature_libs"
# /bin/sh -c "(...)" is to intercept "Killed", "Abort trap",
# "Segmentation fault", or other shell messages.
# "|| true" is to bypass "set -e" setting.
/bin/sh -c "($nxt_test || true)" >> $NXT_AUTOCONF_ERR 2>&1
if [ -x $NXT_AUTOTEST ]; then
case "$nxt_feature_run" in
value)
if /bin/sh -c "($NXT_AUTOTEST)" >> $NXT_AUTOCONF_ERR 2>&1; then
$echo >> $NXT_AUTOCONF_ERR
nxt_found=yes
nxt_feature_value=`$NXT_AUTOTEST`
$echo " $nxt_feature_value"
if [ -n "$nxt_feature_name" ]; then
cat << END >> $NXT_AUTO_CONFIG_H
#ifndef $nxt_feature_name
#define $nxt_feature_name $nxt_feature_value
#endif
END
fi
else
$echo " not found"
fi
;;
yes)
if /bin/sh -c "($NXT_AUTOTEST)" >> $NXT_AUTOCONF_ERR 2>&1; then
$echo " found"
nxt_found=yes
cat << END >> $NXT_AUTO_CONFIG_H
#ifndef $nxt_feature_name
#define $nxt_feature_name 1
#endif
END
else
$echo " found but is not working"
fi
;;
*)
$echo " found"
nxt_found=yes
cat << END >> $NXT_AUTO_CONFIG_H
#ifndef $nxt_feature_name
#define $nxt_feature_name 1
#endif
END
;;
esac
else
$echo " not found"
$echo "----------" >> $NXT_AUTOCONF_ERR
cat $NXT_AUTOTEST.c >> $NXT_AUTOCONF_ERR
$echo "----------" >> $NXT_AUTOCONF_ERR
$echo $nxt_test >> $NXT_AUTOCONF_ERR
$echo "----------" >> $NXT_AUTOCONF_ERR
fi
rm -rf $NXT_AUTOTEST*

51
auto/files Normal file
View File

@@ -0,0 +1,51 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# Linux 2.6, FreeBSD 8.2, 9.1, Solaris 11.
nxt_feature="posix_fadvise()"
nxt_feature_name=NXT_HAVE_POSIX_FADVISE
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <fcntl.h>
int main() {
(void) posix_fadvise(0, 0, 0, POSIX_FADV_WILLNEED);
return 0;
}"
. auto/feature
# FreeBSD 8.0.
nxt_feature="fcntl(F_READAHEAD)"
nxt_feature_name=NXT_HAVE_READAHEAD
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <fcntl.h>
int main() {
(void) fcntl(0, F_READAHEAD, 1024);
return 0;
}"
. auto/feature
# MacOSX, FreeBSD 8.0.
nxt_feature="fcntl(F_RDAHEAD)"
nxt_feature_name=NXT_HAVE_RDAHEAD
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <fcntl.h>
int main() {
(void) fcntl(0, F_RDAHEAD, 1);
return 0;
}"
. auto/feature

12
auto/have Normal file
View File

@@ -0,0 +1,12 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
cat << END >> $NXT_AUTO_CONFIG_H
#ifndef $nxt_have
#define $nxt_have 1
#endif
END

255
auto/make Normal file
View File

@@ -0,0 +1,255 @@
# Copyright (C) Igor Sysoev
# Copyright (C) Valentin V. Bartenev
# Copyright (C) NGINX, Inc.
$echo "creating $NXT_MAKEFILE"
mkdir -p $NXT_BUILD_DIR/src \
$NXT_BUILD_DIR/test
cat << END > $NXT_MAKEFILE
CC = $CC
CFLAGS = $CFLAGS $NXT_CFLAGS $NXT_CC_OPT
NXT_EXEC_LINK = $NXT_EXEC_LINK $NXT_LD_OPT
NXT_SHARED_LOCAL_LINK = $NXT_SHARED_LOCAL_LINK $NXT_LD_OPT
NXT_MODULE_LINK = $NXT_MODULE_LINK $NXT_LD_OPT
END
# The include paths list.
$echo -n "NXT_LIB_INCS =" >> $NXT_MAKEFILE
for nxt_inc in src $NXT_BUILD_DIR
do
$echo -n " -I $nxt_inc" >> $NXT_MAKEFILE
done
$echo >> $NXT_MAKEFILE
$echo >> $NXT_MAKEFILE
# The include files dependences list.
$echo "NXT_LIB_DEPS = \\" >> $NXT_MAKEFILE
for nxt_dep in $NXT_LIB_DEPS $NXT_LIB_UNIT_TEST_DEPS $NXT_AUTO_CONFIG_H
do
$echo " $nxt_dep \\" >> $NXT_MAKEFILE
done
$echo >> $NXT_MAKEFILE
$echo >> $NXT_MAKEFILE
# Library object files list.
$echo "NXT_LIB_OBJS = \\" >> $NXT_MAKEFILE
for nxt_src in $NXT_LIB_SRCS
do
nxt_obj=`$echo $nxt_src | sed -e "s/\.c$/\.o/"`
$echo " $NXT_BUILD_DIR/$nxt_obj \\" >> $NXT_MAKEFILE
done
$echo >> $NXT_MAKEFILE
# Shared and static library.
cat << END >> $NXT_MAKEFILE
libnxt: $NXT_BUILD_DIR/$NXT_LIB_SHARED $NXT_BUILD_DIR/$NXT_LIB_STATIC
$NXT_BUILD_DIR/$NXT_LIB_SHARED: \$(NXT_LIB_OBJS)
\$(NXT_SHARED_LOCAL_LINK) -o $NXT_BUILD_DIR/$NXT_LIB_SHARED \\
\$(NXT_LIB_OBJS) \\
$NXT_LIBM $NXT_LIBS $NXT_LIB_AUX_LIBS
$NXT_BUILD_DIR/$NXT_LIB_STATIC: \$(NXT_LIB_OBJS)
$NXT_STATIC_LINK $NXT_BUILD_DIR/$NXT_LIB_STATIC \\
\$(NXT_LIB_OBJS)
END
# Object files.
for nxt_src in $NXT_LIB_SRCS $NXT_LIB_UNIT_TEST_SRCS
do
nxt_obj=`$echo $nxt_src | sed -e "s/\.c$/\.o/"`
cat << END >> $NXT_MAKEFILE
$NXT_BUILD_DIR/$nxt_obj: $nxt_src \$(NXT_LIB_DEPS)
\$(CC) -c \$(CFLAGS) \$(NXT_LIB_INCS) $NXT_LIB_AUX_CFLAGS \\
-o $NXT_BUILD_DIR/$nxt_obj \\
$nxt_src
END
done
$echo >> $NXT_MAKEFILE
# Unit test object files list.
$echo "NXT_LIB_UNIT_TEST_OBJS = \\" >> $NXT_MAKEFILE
for nxt_src in $NXT_LIB_UNIT_TEST_SRCS
do
nxt_obj=`$echo $nxt_src | sed -e "s/\.c$/\.o/"`
$echo " $NXT_BUILD_DIR/$nxt_obj \\" >> $NXT_MAKEFILE
done
# Unit test and utf8 test executables.
cat << END >> $NXT_MAKEFILE
$NXT_BUILD_DIR/lib_unit_test: \$(NXT_LIB_UNIT_TEST_OBJS) \\
$NXT_BUILD_DIR/$NXT_LIB_STATIC \$(NXT_LIB_DEPS)
\$(NXT_EXEC_LINK) -o $NXT_BUILD_DIR/lib_unit_test \\
\$(NXT_LIB_UNIT_TEST_OBJS) \\
$NXT_BUILD_DIR/$NXT_LIB_STATIC \\
$NXT_LD_OPT $NXT_LIBM $NXT_LIBS $NXT_LIB_AUX_LIBS
$NXT_BUILD_DIR/utf8_file_name_test: $NXT_LIB_UTF8_FILE_NAME_TEST_SRCS \\
$NXT_BUILD_DIR/$NXT_LIB_STATIC \$(NXT_LIB_DEPS)
\$(CC) \$(CFLAGS) \$(NXT_LIB_INCS) $NXT_LIB_AUX_CFLAGS \\
-o $NXT_BUILD_DIR/utf8_file_name_test \\
$NXT_LIB_UTF8_FILE_NAME_TEST_SRCS \\
$NXT_BUILD_DIR/$NXT_LIB_STATIC \\
$NXT_LD_OPT $NXT_LIBM $NXT_LIBS
END
if [ $NXT_LIB_UNIT_TEST = YES ]; then
NXT_UNIT_TEST_TARGETS="$NXT_UNIT_TEST_TARGETS lib_test"
fi
NXT_MAKE_INCS="src $NXT_BUILD_DIR"
NXT_MAKE_DEPS="\$(NXT_LIB_DEPS) $NXT_DEPS"
NXT_MAKE_SRCS="$NXT_SRCS"
# The include pathes list.
$echo -n "NXT_INCS =" >> $NXT_MAKEFILE
for nxt_inc in $NXT_MAKE_INCS
do
$echo -n " -I $nxt_inc" >> $NXT_MAKEFILE
done
$echo >> $NXT_MAKEFILE
$echo >> $NXT_MAKEFILE
# The include files dependences list.
$echo "NXT_DEPS = \\" >> $NXT_MAKEFILE
for nxt_dep in $NXT_MAKE_DEPS
do
$echo " $nxt_dep \\" >> $NXT_MAKEFILE
done
$echo >> $NXT_MAKEFILE
$echo >> $NXT_MAKEFILE
# Object files list.
nxt_modules_obj=`$echo $NXT_MODULES_SRC | sed -e "s/\.c$/\.o/"`
$echo "NXT_OBJS = \\" >> $NXT_MAKEFILE
for nxt_src in $NXT_MAKE_SRCS $NXT_MODULES_SRCS
do
nxt_obj=`$echo $nxt_src | sed -e "s/\.c$/\.o/"`
$echo " $NXT_BUILD_DIR/$nxt_obj \\" >> $NXT_MAKEFILE
done
$echo " $nxt_modules_obj" >> $NXT_MAKEFILE
$echo >> $NXT_MAKEFILE
# nginext executable.
NXT_BIN=nginext
cat << END >> $NXT_MAKEFILE
$NXT_BUILD_DIR/$NXT_BIN: $NXT_BUILD_DIR/$NXT_LIB_STATIC \\
\$(NXT_OBJS)
\$(NXT_EXEC_LINK) -o $NXT_BUILD_DIR/$NXT_BIN \\
\$(NXT_OBJS) $NXT_BUILD_DIR/$NXT_LIB_STATIC \\
$NXT_LIBM $NXT_LIBS $NXT_LIB_AUX_LIBS
END
# nginext object files.
for nxt_src in $NXT_MAKE_SRCS
do
nxt_obj=`$echo $nxt_src | sed -e "s/\.c$/\.o/"`
cat << END >> $NXT_MAKEFILE
$NXT_BUILD_DIR/$nxt_obj: $nxt_src \$(NXT_DEPS)
\$(CC) -c \$(CFLAGS) \$(NXT_INCS) \\
$NXT_LIB_AUX_CFLAGS \\
-o $NXT_BUILD_DIR/$nxt_obj \\
$nxt_src
END
done
# nxt_modules.c.
cat << END >> $NXT_MAKEFILE
$nxt_modules_obj: $NXT_MODULES_SRC \$(NXT_DEPS)
\$(CC) -c \$(CFLAGS) \$(NXT_INCS) \\
$NXT_LIB_AUX_CFLAGS \\
-o $nxt_modules_obj \\
$NXT_MODULES_SRC
END
if [ $NXT_PYTHON_MODULE != NO ]; then
. auto/modules/python/make
fi
# Makefile.
# *.dSYM is MacOSX Clang debug information.
cat << END > Makefile
all: libnxt $NXT_BIN
libnxt:
make -f $NXT_MAKEFILE libnxt
lib_test:
make -f $NXT_MAKEFILE $NXT_BUILD_DIR/lib_unit_test
make -f $NXT_MAKEFILE $NXT_BUILD_DIR/utf8_file_name_test
clean:
rm -rf $NXT_BUILD_DIR *.dSYM Makefile
$NXT_BIN:
make -f $NXT_MAKEFILE $NXT_BUILD_DIR/$NXT_BIN
END

159
auto/malloc Normal file
View File

@@ -0,0 +1,159 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# Linux glibc 2.1.91, FreeBSD 7.0, Solaris 11,
# MacOSX 10.6 (Snow Leopard), NetBSD 5.0.
nxt_feature="posix_memalign()"
nxt_feature_name=NXT_HAVE_POSIX_MEMALIGN
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
int main() {
void *p;
if (posix_memalign(&p, 4096, 4096) != 0)
return 1;
free(p);
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# Solaris, HP-UX.
nxt_feature="memalign()"
nxt_feature_name=NXT_HAVE_MEMALIGN
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
int main() {
void *p;
p = memalign(4096, 4096);
if (p == NULL)
return 1;
free(p);
return 0;
}"
. auto/feature
fi
# Linux malloc_usable_size().
nxt_feature="Linux malloc_usable_size()"
nxt_feature_name=NXT_HAVE_MALLOC_USABLE_SIZE
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <malloc.h>
int main() {
void *p;
p = malloc(4096);
if (malloc_usable_size(p) < 4096)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# FreeBSD malloc_usable_size().
nxt_feature="FreeBSD malloc_usable_size()"
nxt_feature_name=NXT_HAVE_MALLOC_USABLE_SIZE
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <malloc_np.h>
int main() {
void *p;
p = malloc(4096);
if (malloc_usable_size(p) < 4096)
return 1;
return 0;
}"
. auto/feature
fi
if [ $nxt_found = no ]; then
# MacOSX malloc_good_size().
nxt_feature="MacOSX malloc_good_size()"
nxt_feature_name=NXT_HAVE_MALLOC_GOOD_SIZE
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <malloc/malloc.h>
int main() {
if (malloc_good_size(4096) < 4096)
return 1;
return 0;
}"
. auto/feature
fi
# alloca().
# Linux, FreeBSD, MacOSX.
nxt_feature="alloca()"
nxt_feature_name=NXT_HAVE_ALLOCA
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
int main() {
void *p;
p = alloca(256);
if (p == 0)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# Linux, Solaris, MacOSX.
nxt_feature="alloca() in alloca.h"
nxt_feature_name=NXT_HAVE_ALLOCA_H
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <alloca.h>
int main() {
void *p;
p = alloca(256);
if (p == 0)
return 1;
return 0;
}"
. auto/feature
fi

87
auto/mmap Normal file
View File

@@ -0,0 +1,87 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# Linux, FreeBSD, Solaris, MacOSX.
nxt_feature="MAP_ANON"
nxt_feature_name=NXT_HAVE_MAP_ANON
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <sys/mman.h>
int main() {
if (mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0)
== MAP_FAILED)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# Linux, Solaris, HP-UX.
nxt_feature="MAP_ANONYMOUS"
nxt_feature_name=NXT_HAVE_MAP_ANONYMOUS
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <sys/mman.h>
int main() {
if (mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)
== MAP_FAILED)
return 1;
return 0;
}"
. auto/feature
fi
# Linux.
nxt_feature="MAP_POPULATE"
nxt_feature_name=NXT_HAVE_MAP_POPULATE
nxt_feature_run=no
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <sys/mman.h>
int main() {
if (mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, -1, 0)
== MAP_FAILED)
return 1;
return 0;
}"
. auto/feature
# FreeBSD.
nxt_feature="MAP_PREFAULT_READ"
nxt_feature_name=NXT_HAVE_MAP_PREFAULT_READ
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <sys/mman.h>
int main() {
if (mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_PREFAULT_READ,
-1, 0)
== MAP_FAILED)
return 1;
return 0;
}"
. auto/feature

50
auto/modules/conf Normal file
View File

@@ -0,0 +1,50 @@
# Copyright (C) NGINX, Inc.
# Copyright (C) Valentin V. Bartenev
NXT_MODULES_INIT=
NXT_MODULES_SRCS=
if [ $NXT_PYTHON_MODULE != NO ]; then
. auto/modules/python/conf
fi
NXT_MODULES_SRC=$NXT_BUILD_DIR/nxt_modules.c
cat << END > $NXT_MODULES_SRC
#include <nxt_main.h>
#include <nxt_cycle.h>
END
for nxt_init in $NXT_MODULES_INIT
do
$echo "extern nxt_int_t $nxt_init(nxt_thread_t *thr, nxt_cycle_t *cycle);" \
>> $NXT_MODULES_SRC
done
cat << END >> $NXT_MODULES_SRC
nxt_module_init_t nxt_init_modules[] = {
END
for nxt_init in $NXT_MODULES_INIT
do
$echo " $nxt_init," >> $NXT_MODULES_SRC
done
cat << END >> $NXT_MODULES_SRC
};
nxt_uint_t nxt_init_modules_n = nxt_nitems(nxt_init_modules);
END

54
auto/modules/python/conf Normal file
View File

@@ -0,0 +1,54 @@
# Copyright (C) NGINX, Inc.
# Copyright (C) Valentin V. Bartenev
NXT_PYTHON_VERSION=`${NXT_PYTHON} -c \
'import sysconfig, sys; \
sys.stdout.write(sysconfig.get_python_version())'`
NXT_PYTHON_INCLUDE=`${NXT_PYTHON} -c \
'import sysconfig, sys; \
sys.stdout.write(sysconfig.get_path("platinclude"))'`
NXT_PYTHON_LIB="-lpython${NXT_PYTHON_VERSION}"
NXT_PYTHON_LIBS=`${NXT_PYTHON} -c \
'import sysconfig, sys; \
sys.stdout.write(sysconfig.get_config_var("SYSLIBS") \
+ " " + sysconfig.get_config_var("LIBS"))'`
nxt_feature="Python"
nxt_feature_name=NXT_HAVE_PYTHON
nxt_feature_run=no
nxt_feature_incs="-I${NXT_PYTHON_INCLUDE}"
nxt_feature_libs="$NXT_PYTHON_LIB $NXT_PYTHON_LIBS"
nxt_feature_test="#include <Python.h>
int main() {
Py_Initialize();
}"
. auto/feature
if [ $nxt_found = no ]; then
$echo
$echo $0: error: no Python found.
$echo
exit 1;
fi
$echo " + Python version: ${NXT_PYTHON_VERSION}"
NXT_PYTHON_MODULE_SRCS=" \
src/nxt_python_wsgi.c \
"
NXT_MODULES_INIT="$NXT_MODULES_INIT nxt_python_wsgi_init"
NXT_MODULES_SRCS="$NXT_MODULES_SRCS $NXT_PYTHON_MODULE_SRCS"
NXT_LIB_AUX_LIBS="$NXT_LIB_AUX_LIBS $NXT_PYTHON_LIB $NXT_PYTHON_LIBS"

24
auto/modules/python/make Normal file
View File

@@ -0,0 +1,24 @@
# Copyright (C) NGINX, Inc.
# Copyright (C) Valentin V. Bartenev
$echo >> $NXT_MAKEFILE
$echo >> $NXT_MAKEFILE
# The python module object files.
for nxt_src in $NXT_PYTHON_MODULE_SRCS
do
nxt_obj=`$echo $nxt_src | sed -e "s/\.c$/\.o/"`
cat << END >> $NXT_MAKEFILE
$NXT_BUILD_DIR/$nxt_obj: $nxt_src
\$(CC) -c \$(CFLAGS) \$(NXT_INCS) -I $NXT_PYTHON_INCLUDE \\
$NXT_LIB_AUX_CFLAGS \\
-o $NXT_BUILD_DIR/$nxt_obj \\
$nxt_src
END
done

108
auto/options Normal file
View File

@@ -0,0 +1,108 @@
# Copyright (C) Igor Sysoev
# Copyright (C) Valentin V. Bartenev
# Copyright (C) NGINX, Inc.
CC=${CC:-cc}
NXT_BUILD_DIR=build
NXT_CONFIGURE_OPTIONS=
NXT_CFLAGS=
NXT_CC_OPT=
NXT_LD_OPT=
NXT_DEBUG=NO
NXT_THREADS=YES
NXT_INET6=NO
NXT_UNIX_DOMAIN=YES
NXT_REGEX=NO
NXT_PCRE=NO
NXT_SSLTLS=NO
NXT_OPENSSL=NO
NXT_GNUTLS=NO
NXT_CYASSL=NO
NXT_POLARSSL=NO
NXT_TEST_BUILD_EPOLL=NO
NXT_TEST_BUILD_EVENTPORT=NO
NXT_TEST_BUILD_DEVPOLL=NO
NXT_TEST_BUILD_POLLSET=NO
NXT_TEST_BUILD_FREEBSD_SENDFILE=NO
NXT_TEST_BUILD_LINUX_SENDFILE=NO
NXT_TEST_BUILD_MACOSX_SENDFILE=NO
NXT_TEST_BUILD_SOLARIS_SENDFILEV=NO
NXT_TEST_BUILD_AIX_SEND_FILE=NO
NXT_TEST_BUILD_HPUX_SENDFILE=NO
NXT_LIB_UNIT_TEST=NO
NXT_PYTHON=python
NXT_PYTHON_MODULE=NO
for nxt_option
do
case "$nxt_option" in
-*=*) value=`$echo "$nxt_option" | sed -e 's/[-_a-zA-Z0-9]*=//'` ;;
*) value="" ;;
esac
case "$nxt_option" in
--with-cc=*) CC="$value" ;;
--with-cc-opt=*) NXT_CC_OPT="$value" ;;
--with-ld-opt=*) NXT_LD_OPT="$value" ;;
--build-dir=*) NXT_BUILD_DIR="$value" ;;
--with-debug) NXT_DEBUG=YES ;;
--with-threads) NXT_THREADS=YES ;;
--without-threads) NXT_THREADS=NO ;;
--with-ipv6) NXT_INET6=YES ;;
--with-inet6) NXT_INET6=YES ;;
--without-unix-domain) NXT_UNIX_DOMAIN=NO ;;
--with-pcre) NXT_PCRE=YES ;;
--with-ssltls) NXT_SSLTLS=YES ;;
--with-openssl) NXT_OPENSSL=YES ;;
--with-gnutls) NXT_GNUTLS=YES ;;
--with-cyassl) NXT_CYASSL=YES ;;
--with-polarssl) NXT_POLARSSL=YES ;;
--test-build-epoll) NXT_TEST_BUILD_EPOLL=YES ;;
--test-build-eventport) NXT_TEST_BUILD_EVENTPORT=YES ;;
--test-build-devpoll) NXT_TEST_BUILD_DEVPOLL=YES ;;
--test-build-pollset) NXT_TEST_BUILD_POLLSET=YES ;;
--test-build-freebsd-sendfile) NXT_TEST_BUILD_FREEBSD_SENDFILE=YES ;;
--test-build-linux-sendfile) NXT_TEST_BUILD_LINUX_SENDFILE=YES ;;
--test-build-solaris-sendfilev) NXT_TEST_BUILD_SOLARIS_SENDFILEV=YES ;;
--test-build-macosx-sendfile) NXT_TEST_BUILD_MACOSX_SENDFILE=YES ;;
--test-build-aix-send_file) NXT_TEST_BUILD_AIX_SEND_FILE=YES ;;
--test-build-hpux-sendfile) NXT_TEST_BUILD_HPUX_SENDFILE=YES ;;
--with-lib-unit-tests) NXT_LIB_UNIT_TEST=YES ;;
--with-python=*) NXT_PYTHON="$value" ;;
--with-python_module) NXT_PYTHON_MODULE=YES ;;
*)
$echo
$echo "$0: error: invalid option \"$nxt_option\"".
$echo
exit 1
;;
esac
nxt_opt=`$echo $nxt_option | sed -e "s/\(--[^=]*=\)\(.* .*\)/\1'\2'/"`
NXT_CONFIGURE_OPTIONS="$NXT_CONFIGURE_OPTIONS $nxt_opt"
done

243
auto/os/conf Normal file
View File

@@ -0,0 +1,243 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# To support dynamically loaded modules libnxt library must be a shared
# object itself because an application linked with static libnxt library
# may lack code required by the modules. Dynamic linkers allow to specify
# relative path in SONAME library entry or in RPATH executable entry.
#
# Solaris 7, Linux 2.2, and FreeBSD 7.3 support $ORIGIN variable.
# MacOSX supports @executable_path variable.
# NetBSD does not support $ORIGIN variable.
#
# "ar -r" is enough to create a static library, ranlib is surplus.
# "ar -c" disables the "creating archive" warning.
case "$NXT_SYSTEM" in
Linux)
nxt_have=NXT_LINUX . auto/have
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -shared -Wl,-soname,libnxt.so"
NXT_SHARED_LOCAL_LINK="\$(CC) -shared \
-Wl,-soname,\\\$\$ORIGIN/libnxt.so"
NXT_MODULE_LINK="\$(CC) -shared"
NXT_MODULE_LINK="\$(CC) -shared"
# "-Wl,-E" exports symbols of executable file.
NXT_EXEC_LINK="\$(CC) -Wl,-E"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_LIBM="-lm"
NXT_LIBS="$NXT_LIBRT $NXT_LIBDL $NXT_PTHREAD"
;;
FreeBSD)
nxt_have=NXT_FREEBSD . auto/have
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -shared -Wl,-soname,libnxt.so"
NXT_SHARED_LOCAL_LINK="\$(CC) -shared \
-Wl,-soname,\\\$\$ORIGIN/libnxt.so"
NXT_MODULE_LINK="\$(CC) -shared"
# "-Wl,-E" exports symbols of executable file.
NXT_EXEC_LINK="\$(CC) -Wl,-E"
# "-Wl,-z,origin" enables $ORIGIN processing.
NXT_SHARED_LOCAL_EXEC_LINK="-Wl,-z,origin"
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_LIBM="-lm"
NXT_LIBS="$NXT_PTHREAD"
;;
SunOS)
nxt_have=NXT_SOLARIS . auto/have
case "$NXT_CC_NAME" in
SunC):
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -G -h libnxt.so"
NXT_SHARED_LOCAL_LINK="\$(CC) -G -h \\\$\$ORIGIN/libnxt.so"
NXT_MODULE_LINK="\$(CC) -G"
;;
*)
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -shared -Wl,-soname,libnxt.so"
NXT_SHARED_LOCAL_LINK="\$(CC) -shared \
-Wl,-soname,\\\$\$ORIGIN/libnxt.so"
NXT_MODULE_LINK="\$(CC) -shared"
;;
esac
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_EXEC_LINK="\$(CC)"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_LIBM="-lm"
NXT_LIBS="-lsocket $NXT_LIBSENDFILE"
NXT_LIBS="$NXT_LIBS $NXT_LIBRT $NXT_LIBDL $NXT_PTHREAD"
;;
Darwin)
nxt_have=NXT_MACOSX . auto/have
# HFS+ volumes are caseless by default.
nxt_have=NXT_HAVE_CASELESS_FILESYSTEM . auto/have
# MacOSX 10.6 (Snow Leopard) has deprecated ucontext(3).
# MacOSX 10.7 (Lion) has deprecated system OpenSSL.
# MAC_OS_X_VERSION_MIN_REQUIRED macro does not help.
# The minimum version allowed for i386 is 10.4 (Tiger).
NXT_CFLAGS="$NXT_CFLAGS -mmacosx-version-min=10.4"
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -dynamiclib"
NXT_SHARED_LOCAL_LINK="\$(CC) -dynamiclib \
-install_name @executable_path/libnxt.dylib"
# Prior to MacOSX 10.5 (Leopard) only bundles could be unloaded.
NXT_MODULE_LINK="\$(CC) -bundle -undefined dynamic_lookup"
NXT_EXEC_LINK="\$(CC)"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.dylib"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.dylib"
# MacOSX libm.dylib is a symlink to libSystem.dylib.
NXT_LIBM=
NXT_LIBS=
;;
NetBSD)
nxt_have=NXT_NETBSD . auto/have
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -shared"
NXT_SHARED_LOCAL_LINK="\$(CC) -shared"
NXT_MODULE_LINK="\$(CC) -shared"
NXT_EXEC_LINK="\$(CC)"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_LIBM="-lm"
NXT_LIBS="$NXT_PTHREAD"
;;
OpenBSD)
nxt_have=NXT_OPENBSD . auto/have
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -shared"
NXT_SHARED_LOCAL_LINK="\$(CC) -shared"
NXT_MODULE_LINK="\$(CC) -shared"
NXT_EXEC_LINK="\$(CC)"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_LIBM="-lm"
NXT_LIBS="$NXT_PTHREAD"
;;
AIX)
nxt_have=NXT_AIX . auto/have
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -G"
NXT_SHARED_LOCAL_LINK="\$(CC) -G"
NXT_MODULE_LINK="\$(CC) -G"
NXT_EXEC_LINK="\$(CC)"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_LIBM="-lm"
NXT_LIBS="$NXT_PTHREAD"
;;
HP-UX)
nxt_have=NXT_HPUX . auto/have
NXT_EXEC_LINK="\$(CC)"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -shared"
NXT_SHARED_LOCAL_LINK="\$(CC) -shared"
NXT_MODULE_LINK="\$(CC) -shared"
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_LIBM="-lm"
NXT_LIBS="$NXT_PTHREAD $NXT_LIBHG"
;;
QNX)
nxt_have=NXT_QNX . auto/have
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -shared"
NXT_SHARED_LOCAL_LINK="\$(CC) -shared"
NXT_MODULE_LINK="\$(CC) -shared"
NXT_EXEC_LINK="\$(CC)"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_LIBM="-lm"
NXT_LIBS="$NXT_PTHREAD"
;;
*)
NXT_STATIC_LINK="ar -r -c"
NXT_SHARED_LINK="\$(CC) -shared"
NXT_SHARED_LOCAL_LINK="\$(CC) -shared"
NXT_MODULE_LINK="\$(CC) -shared"
# "-Wl,-E" exports symbols of executable file.
NXT_EXEC_LINK="\$(CC) -Wl,-E"
NXT_SHARED_LOCAL_EXEC_LINK=
NXT_LIB_STATIC="libnxt.a"
NXT_LIB_SHARED="libnxt.so"
NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so"
NXT_LIBM="-lm"
NXT_LIBS="$NXT_LIBRT $NXT_LIBDL $NXT_PTHREAD"
;;
esac

93
auto/os/test Normal file
View File

@@ -0,0 +1,93 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
NXT_SYSTEM=`uname -s 2>/dev/null`
case "$NXT_SYSTEM" in
Linux)
NXT_SYSTEM_VERSION=`uname -r 2>/dev/null`
# Linux uname -p can return "unknown".
NXT_SYSTEM_PLATFORM=`uname -m 2>/dev/null`
echo=echo
CC=${CC:-cc}
;;
FreeBSD | NetBSD | OpenBSD)
NXT_SYSTEM_VERSION=`uname -r 2>/dev/null`
NXT_SYSTEM_PLATFORM=`uname -m 2>/dev/null`
echo=echo
CC=${CC:-cc}
;;
SunOS)
NXT_SYSTEM_VERSION=`uname -r 2>/dev/null`
NXT_SYSTEM_PLATFORM=`uname -p 2>/dev/null`
echo=echo
CC=${CC:-gcc}
NXT_TEST_CFLAGS="$NXT_TEST_CFLAGS -D_XOPEN_SOURCE"
NXT_TEST_CFLAGS="$NXT_TEST_CFLAGS -D_XOPEN_SOURCE_EXTENDED=1"
NXT_TEST_CFLAGS="$NXT_TEST_CFLAGS -D__EXTENSIONS__"
NXT_TEST_LIBS="-lsocket"
;;
Darwin)
NXT_SYSTEM_VERSION=`uname -r 2>/dev/null`
NXT_SYSTEM_PLATFORM=`uname -m 2>/dev/null`
echo=echo
CC=${CC:-cc}
NXT_TEST_CFLAGS="$NXT_TEST_CFLAGS -mmacosx-version-min=10.4"
;;
AIX)
NXT_SYSTEM_VERSION="`uname -v 2>/dev/null`.`uname -r 2>/dev/null`"
NXT_SYSTEM_PLATFORM=`uname -p 2>/dev/null`
echo=echo
CC=${CC:-gcc}
;;
HP-UX)
NXT_SYSTEM_VERSION=`uname -r 2>/dev/null`
NXT_SYSTEM_PLATFORM=`uname -m 2>/dev/null`
echo=echo
CC=${CC:-gcc}
NXT_TEST_CFLAGS="$NXT_TEST_CFLAGS -D_XOPEN_SOURCE"
NXT_TEST_CFLAGS="$NXT_TEST_CFLAGS -D_XOPEN_SOURCE_EXTENDED"
NXT_TEST_CFLAGS="$NXT_TEST_CFLAGS -D_HPUX_ALT_XOPEN_SOCKET_API"
;;
QNX)
NXT_SYSTEM_VERSION=`uname -r 2>/dev/null`
NXT_SYSTEM_PLATFORM=`uname -p 2>/dev/null`
echo=echo
CC=${CC:-gcc}
;;
MINGW*)
# MinGW /bin/sh builtin "echo" omits newline under Wine
# for some reason, so use a portable echo.c program built
# using MinGW GCC with only msvcrt.dll dependence.
NXT_SYSTEM_VERSION=`uname -r 2>/dev/null`
NXT_SYSTEM_PLATFORM=`uname -m 2>/dev/null`
echo=auto/echo/echo.exe
CC=${CC:-cl}
NXT_WINDOWS=YES
;;
*)
NXT_SYSTEM_VERSION=`uname -r 2>/dev/null`
NXT_SYSTEM_PLATFORM=`uname -p 2>/dev/null`
echo=echo
CC=${CC:-gcc}
;;
esac
$echo configuring for $NXT_SYSTEM $NXT_SYSTEM_VERSION $NXT_SYSTEM_PLATFORM

49
auto/pcre Normal file
View File

@@ -0,0 +1,49 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
NXT_REGEX=NO
NXT_PCRE_CFLAGS=
NXT_PCRE_LIB=
if [ $NXT_PCRE = YES ]; then
nxt_found=no
if /bin/sh -c "(pcre-config --version)" >> $NXT_AUTOCONF_ERR 2>&1; then
NXT_PCRE_CFLAGS=`pcre-config --cflags`
NXT_PCRE_LIB=`pcre-config --libs`
nxt_feature="PCRE library"
nxt_feature_name=NXT_HAVE_PCRE
nxt_feature_run=no
nxt_feature_incs=$NXT_PCRE_CFLAGS
nxt_feature_libs=$NXT_PCRE_LIB
nxt_feature_test="#include <pcre.h>
int main() {
pcre *re;
re = pcre_compile(NULL, 0, NULL, 0, NULL);
if (re == NULL)
return 1;
return 0;
}"
. auto/feature
fi
if [ $nxt_found = no ]; then
$echo
$echo $0: error: no PCRE library found.
$echo
exit 1;
fi
NXT_REGEX=YES
nxt_have=NXT_REGEX . auto/have
$echo " + PCRE version: `pcre-config --version`"
fi

158
auto/sendfile Normal file
View File

@@ -0,0 +1,158 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
NXT_HAVE_LINUX_SENDFILE=NO
NXT_HAVE_FREEBSD_SENDFILE=NO
NXT_HAVE_MACOSX_SENDFILE=NO
NXT_HAVE_SOLARIS_SENDFILEV=NO
NXT_HAVE_AIX_SEND_FILE=NO
NXT_HAVE_HPUX_SENDFILE=NO
# Linux sendfile().
nxt_feature="Linux sendfile()"
nxt_feature_name=NXT_HAVE_LINUX_SENDFILE
nxt_feature_test="#include <sys/sendfile.h>
int main() {
off_t offset;
sendfile(-1, -1, &offset, 0);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_LINUX_SENDFILE=YES
fi
if [ $nxt_found = no ]; then
# FreeBSD sendfile().
nxt_feature="FreeBSD sendfile()"
nxt_feature_name=NXT_HAVE_FREEBSD_SENDFILE
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <sys/types.h>
#include <sys/socket.h>
#include <sys/uio.h>
#include <stdlib.h>
int main() {
off_t sent;
sendfile(-1, -1, 0, 0, NULL, &sent, SF_NODISKIO);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_FREEBSD_SENDFILE=YES
fi
fi
NXT_LIBSENDFILE=
if [ $nxt_found = no ]; then
# Solaris 8 sendfilev().
nxt_feature="Solaris sendfilev()"
nxt_feature_name=NXT_HAVE_SOLARIS_SENDFILEV
nxt_feature_libs="-lsendfile"
nxt_feature_test="#include <sys/sendfile.h>
int main() {
size_t sent;
struct sendfilevec vec;
sendfilev(-1, &vec, 0, &sent);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_SOLARIS_SENDFILEV=YES
NXT_LIBSENDFILE=$nxt_feature_libs
fi
fi
if [ $nxt_found = no ]; then
# MacOSX sendfile().
nxt_feature="MacOSX sendfile()"
nxt_feature_name=NXT_HAVE_MACOSX_SENDFILE
nxt_feature_libs=
nxt_feature_test="#include <sys/types.h>
#include <sys/socket.h>
#include <sys/uio.h>
#include <stdlib.h>
int main() {
off_t sent;
sendfile(-1, -1, 0, &sent, NULL, 0);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_MACOSX_SENDFILE=YES
fi
fi
if [ $nxt_found = no ]; then
# AIX send_file().
nxt_feature="AIX send_file()"
nxt_feature_name=NXT_HAVE_AIX_SEND_FILE
nxt_feature_test="#include <sys/socket.h>
int main() {
int s;
struct sf_parms sf_iobuf;
send_file(&s, &sf_iobuf, 0);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_AIX_SEND_FILE=YES
fi
fi
if [ $nxt_found = no ]; then
# HP-UX sendfile().
nxt_feature="HP-UX sendfile()"
nxt_feature_name=NXT_HAVE_HPUX_SENDFILE
nxt_feature_libs=
nxt_feature_test="#include <sys/socket.h>
#include <stdlib.h>
sbsize_t sendfile(int s, int fd, off_t offset,
bsize_t nbytes, const struct iovec *hdtrl, int flags);
int main() {
sendfile(-1, -1, 0, 0, NULL, 0);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_HAVE_HPUX_SENDFILE=YES
fi
fi

233
auto/sockets Normal file
View File

@@ -0,0 +1,233 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
if [ $NXT_INET6 = YES ]; then
nxt_feature="AF_INET6"
nxt_feature_name=NXT_INET6
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <netinet/in.h>
int main() {
struct sockaddr_in6 sin6;
sin6.sin6_family = AF_INET6;
printf(\"%d\", sin6.sin6_family);
return 0;
}"
. auto/feature
fi
# FreeBSD, MacOSX, NetBSD, OpenBSD.
nxt_feature="sockaddr.sa_len"
nxt_feature_name=NXT_SOCKADDR_SA_LEN
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/socket.h>
int main() {
struct sockaddr sa;
sa.sa_len = 0;
printf(\"%d\", sa.sa_len);
return 0;
}"
. auto/feature
nxt_feature="struct sockaddr size"
nxt_feature_name=NXT_HAVE_SOCKADDR
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/socket.h>
int main() {
printf(\"%d\", (int) sizeof(struct sockaddr));
return 0;
}"
. auto/feature
nxt_feature="struct sockaddr_in size"
nxt_feature_name=NXT_HAVE_SOCKADDR_IN
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/types.h>
#include <netinet/in.h>
int main() {
printf(\"%d\", (int) sizeof(struct sockaddr_in));
return 0;
}"
. auto/feature
nxt_feature="struct sockaddr_in6 size"
nxt_feature_name=NXT_HAVE_SOCKADDR_IN6
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/types.h>
#include <netinet/in.h>
int main() {
printf(\"%d\", (int) sizeof(struct sockaddr_in6));
return 0;
}"
. auto/feature
nxt_feature="struct sockaddr_un size"
nxt_feature_name=NXT_HAVE_SOCKADDR_UN
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/types.h>
#include <sys/un.h>
int main() {
printf(\"%d\", (int) sizeof(struct sockaddr_un));
return 0;
}"
. auto/feature
nxt_feature="struct sockaddr_storage size"
nxt_feature_name=NXT_HAVE_SOCKADDR_STORAGE
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/socket.h>
int main() {
printf(\"%d\", (int) sizeof(struct sockaddr_storage));
return 0;
}"
. auto/feature
nxt_feature="socketpair(AF_UNIX, SOCK_SEQPACKET)"
nxt_feature_name=NXT_HAVE_AF_UNIX_SOCK_SEQPACKET
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/socket.h>
int main() {
int pair[2];
if (socketpair(AF_UNIX, SOCK_SEQPACKET, 0, pair) != 0)
return 1;
return 0;
}"
. auto/feature
nxt_feature="struct msghdr.msg_control"
nxt_feature_name=NXT_HAVE_MSGHDR_MSG_CONTROL
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
#include <sys/socket.h>
int main() {
struct msghdr msg;
printf(\"%d\", (int) sizeof(msg.msg_control));
return 0;
}"
. auto/feature
nxt_feature="sys/filio.h"
nxt_feature_name=NXT_HAVE_SYS_FILIO_H
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <sys/filio.h>
int main() {
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_SYS_FILIO_H="#include <sys/filio.h>"
else
NXT_SYS_FILIO_H=
fi
nxt_feature="ioctl(FIONBIO)"
nxt_feature_name=NXT_HAVE_FIONBIO
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <unistd.h>
#include <sys/socket.h>
$NXT_SYS_FILIO_H
#include <sys/ioctl.h>
int main() {
int nb;
nb = 0;
ioctl(-1, FIONBIO, &nb);
return 0;
}"
. auto/feature
# socket(SOCK_NONBLOCK), Linux 2.6.27/glibc 2.10, NetBSD 6.0, FreeBSD 9.2.
nxt_feature="socket(SOCK_NONBLOCK)"
nxt_feature_name=NXT_HAVE_SOCK_NONBLOCK
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#define _GNU_SOURCE
#include <sys/socket.h>
int main() {
socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
return 0;
}"
. auto/feature
# accept4(), Linux 2.6.28/glibc 2.10, NetBSD 6.0, FreeBSD 9.2.
nxt_feature="accept4()"
nxt_feature_name=NXT_HAVE_ACCEPT4
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#define _GNU_SOURCE
#include <stdlib.h>
#include <sys/socket.h>
int main() {
accept4(0, NULL, NULL, SOCK_NONBLOCK);
return 0;
}"
. auto/feature

326
auto/sources Normal file
View File

@@ -0,0 +1,326 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
NXT_LIB_DEPS=" \
src/nxt_main.h \
src/nxt_clang.h \
src/nxt_types.h \
src/nxt_atomic.h \
src/nxt_errno.h \
src/nxt_time.h \
src/nxt_unix.h \
src/nxt_malloc.h \
src/nxt_file.h \
src/nxt_mem_map.h \
src/nxt_socket.h \
src/nxt_process.h \
src/nxt_signal.h \
src/nxt_chan.h \
src/nxt_dyld.h \
src/nxt_thread.h \
src/nxt_thread_id.h \
src/nxt_spinlock.h \
src/nxt_random.h \
src/nxt_queue.h \
src/nxt_rbtree.h \
src/nxt_string.h \
src/nxt_utf8.h \
src/nxt_unicode_lowcase.h \
src/nxt_parse.h \
src/nxt_mem_pool.h \
src/nxt_mem_pool_cleanup.h \
src/nxt_mem_cache_pool.h \
src/nxt_mem_zone.h \
src/nxt_sprintf.h \
src/nxt_file_name.h \
src/nxt_log.h \
src/nxt_djb_hash.h \
src/nxt_murmur_hash.h \
src/nxt_lvlhsh.h \
src/nxt_hash.h \
src/nxt_sort.h \
src/nxt_array.h \
src/nxt_vector.h \
src/nxt_list.h \
src/nxt_buf.h \
src/nxt_buf_pool.h \
src/nxt_buf_filter.h \
src/nxt_recvbuf.h \
src/nxt_sendbuf.h \
src/nxt_thread_log.h \
src/nxt_thread_time.h \
src/nxt_work_queue.h \
src/nxt_service.h \
src/nxt_fiber.h \
src/nxt_log_moderation.h \
src/nxt_event_set.h \
src/nxt_event_engine.h \
src/nxt_event_timer.h \
src/nxt_event_fd.h \
src/nxt_event_conn.h \
src/nxt_event_file.h \
src/nxt_job.h \
src/nxt_job_file.h \
src/nxt_sockaddr.h \
src/nxt_job_resolve.h \
src/nxt_listen_socket.h \
"
NXT_LIB_SRCS=" \
src/nxt_lib.c \
src/nxt_gmtime.c \
src/nxt_errno.c \
src/nxt_time.c \
src/nxt_malloc.c \
src/nxt_file.c \
src/nxt_mem_map.c \
src/nxt_socket.c \
src/nxt_socketpair.c \
src/nxt_process.c \
src/nxt_process_title.c \
src/nxt_signal.c \
src/nxt_chan.c \
src/nxt_dyld.c \
src/nxt_random.c \
src/nxt_queue.c \
src/nxt_rbtree.c \
src/nxt_mem_pool.c \
src/nxt_mem_pool_cleanup.c \
src/nxt_mem_cache_pool.c \
src/nxt_mem_zone.c \
src/nxt_string.c \
src/nxt_utf8.c \
src/nxt_parse.c \
src/nxt_sprintf.c \
src/nxt_file_name.c \
src/nxt_log.c \
src/nxt_djb_hash.c \
src/nxt_murmur_hash.c \
src/nxt_lvlhsh.c \
src/nxt_lvlhsh_pool.c \
src/nxt_array.c \
src/nxt_vector.c \
src/nxt_list.c \
src/nxt_buf.c \
src/nxt_buf_pool.c \
src/nxt_buf_filter.c \
src/nxt_recvbuf.c \
src/nxt_sendbuf.c \
src/nxt_thread_time.c \
src/nxt_time_parse.c \
src/nxt_work_queue.c \
src/nxt_service.c \
src/nxt_fiber.c \
src/nxt_log_moderation.c \
src/nxt_event_set.c \
src/nxt_event_engine.c \
src/nxt_event_timer.c \
src/nxt_event_conn.c \
src/nxt_event_conn_connect.c \
src/nxt_event_conn_accept.c \
src/nxt_event_conn_read.c \
src/nxt_event_conn_write.c \
src/nxt_event_conn_job_sendfile.c \
src/nxt_event_conn_proxy.c \
src/nxt_job.c \
src/nxt_job_file.c \
src/nxt_sockaddr.c \
src/nxt_job_resolve.c \
src/nxt_listen_socket.c \
"
NXT_LIB_THREAD_DEPS=" \
src/nxt_semaphore.h \
src/nxt_thread_pool.h \
"
NXT_LIB_THREAD_SRCS=" \
src/nxt_thread.c \
src/nxt_thread_id.c \
src/nxt_thread_mutex.c \
src/nxt_thread_cond.c \
src/nxt_spinlock.c \
src/nxt_semaphore.c \
src/nxt_thread_pool.c \
"
NXT_LIB_SSLTLS_DEPS="src/nxt_ssltls.h"
NXT_LIB_SSLTLS_SRCS="src/nxt_ssltls.c"
NXT_LIB_OPENSSL_SRCS="src/nxt_openssl.c"
NXT_LIB_GNUTLS_SRCS="src/nxt_gnutls.c"
NXT_LIB_CYASSL_SRCS="src/nxt_cyassl.c"
NXT_LIB_POLARSSL_SRCS="src/nxt_polarssl.c"
NXT_LIB_EPOLL_SRCS="src/nxt_epoll.c"
NXT_LIB_KQUEUE_SRCS="src/nxt_kqueue.c"
NXT_LIB_EVENTPORT_SRCS="src/nxt_eventport.c"
NXT_LIB_DEVPOLL_SRCS="src/nxt_devpoll.c"
NXT_LIB_POLLSET_SRCS="src/nxt_pollset.c"
NXT_LIB_POLL_SRCS="src/nxt_poll.c"
NXT_LIB_SELECT_SRCS="src/nxt_select.c"
NXT_LIB_LINUX_SENDFILE_SRCS="src/nxt_linux_sendfile.c"
NXT_LIB_FREEBSD_SENDFILE_SRCS="src/nxt_freebsd_sendfile.c"
NXT_LIB_SOLARIS_SENDFILEV_SRCS="src/nxt_solaris_sendfilev.c"
NXT_LIB_MACOSX_SENDFILE_SRCS="src/nxt_macosx_sendfile.c"
NXT_LIB_AIX_SEND_FILE_SRCS="src/nxt_aix_send_file.c"
NXT_LIB_HPUX_SENDFILE_SRCS="src/nxt_hpux_sendfile.c"
NXT_LIB_TEST_BUILD_DEPS="src/nxt_test_build.h"
NXT_LIB_TEST_BUILD_SRCS="src/nxt_test_build.c"
NXT_LIB_UNIT_TEST_DEPS="test/nxt_lib_unit_test.h \
test/nxt_rbtree1.h \
"
NXT_LIB_UNIT_TEST_SRCS=" \
test/nxt_lib_unit_test.c \
test/nxt_rbtree1.c \
test/nxt_rbtree_unit_test.c \
test/nxt_term_parse_unit_test.c \
test/nxt_msec_diff_unit_test.c \
test/nxt_exp_approximation.c \
test/nxt_mem_cache_pool_unit_test.c \
test/nxt_mem_zone_unit_test.c \
test/nxt_lvlhsh_unit_test.c \
test/nxt_gmtime_unit_test.c \
test/nxt_sprintf_unit_test.c \
test/nxt_malloc_unit_test.c \
test/nxt_utf8_unit_test.c \
test/nxt_rbtree1_unit_test.c \
"
NXT_LIB_UTF8_FILE_NAME_TEST_SRCS=" \
test/nxt_utf8_file_name_test.c \
"
if [ $NXT_THREADS = YES ]; then
NXT_LIB_DEPS="$NXT_LIB_DEPS $NXT_LIB_THREAD_DEPS"
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_THREAD_SRCS"
fi
if [ $NXT_SSLTLS = YES ]; then
nxt_have=NXT_SSLTLS . auto/have
NXT_LIB_DEPS="$NXT_LIB_DEPS $NXT_LIB_SSLTLS_DEPS"
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_SSLTLS_SRCS"
fi
if [ $NXT_OPENSSL = YES ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_OPENSSL_SRCS"
fi
if [ $NXT_GNUTLS = YES ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_GNUTLS_SRCS"
fi
if [ $NXT_CYASSL = YES ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_CYASSL_SRCS"
fi
if [ $NXT_POLARSSL = YES ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_POLARSSL_SRCS"
fi
if [ "$NXT_HAVE_EPOLL" = "YES" -o "$NXT_TEST_BUILD_EPOLL" = "YES" ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_EPOLL_SRCS"
fi
if [ "$NXT_HAVE_KQUEUE" = "YES" ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_KQUEUE_SRCS"
fi
if [ "$NXT_HAVE_EVENTPORT" = "YES" -o "$NXT_TEST_BUILD_EVENTPORT" = "YES" ];
then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_EVENTPORT_SRCS"
fi
if [ "$NXT_HAVE_DEVPOLL" = "YES" -o "$NXT_TEST_BUILD_DEVPOLL" = "YES" ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_DEVPOLL_SRCS"
fi
if [ "$NXT_HAVE_POLLSET" = "YES" -o "$NXT_TEST_BUILD_POLLSET" = "YES" ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_POLLSET_SRCS"
fi
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_POLL_SRCS"
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_SELECT_SRCS"
if [ "$NXT_HAVE_LINUX_SENDFILE" = "YES" \
-o "$NXT_TEST_BUILD_LINUX_SENDFILE" = "YES" ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_LINUX_SENDFILE_SRCS"
fi
if [ "$NXT_HAVE_FREEBSD_SENDFILE" = "YES" \
-o "$NXT_TEST_BUILD_FREEBSD_SENDFILE" = "YES" ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_FREEBSD_SENDFILE_SRCS"
fi
if [ "$NXT_HAVE_SOLARIS_SENDFILEV" = "YES" \
-o "$NXT_TEST_BUILD_SOLARIS_SENDFILEV" = "YES" ];
then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_SOLARIS_SENDFILEV_SRCS"
fi
if [ "$NXT_HAVE_MACOSX_SENDFILE" = "YES" \
-o "$NXT_TEST_BUILD_MACOSX_SENDFILE" = "YES" ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_MACOSX_SENDFILE_SRCS"
fi
if [ "$NXT_HAVE_AIX_SEND_FILE" = "YES" \
-o "$NXT_TEST_BUILD_AIX_SEND_FILE" = "YES" ];
then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_AIX_SEND_FILE_SRCS"
fi
if [ "$NXT_HAVE_HPUX_SENDFILE" = "YES" \
-o "$NXT_TEST_BUILD_HPUX_SENDFILE" = "YES" ]; then
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_HPUX_SENDFILE_SRCS"
fi
if [ "$NXT_TEST_BUILD" = "YES" ]; then
NXT_LIB_DEPS="$NXT_LIB_DEPS $NXT_LIB_TEST_BUILD_DEPS"
NXT_LIB_SRCS="$NXT_LIB_SRCS $NXT_LIB_TEST_BUILD_SRCS"
fi
if [ $NXT_LIB_UNIT_TEST = YES ]; then
nxt_have=NXT_LIB_UNIT_TEST . auto/have
fi
NXT_DEPS=" \
src/nxt_cycle.h \
src/nxt_process_chan.h \
src/nxt_application.h \
src/nxt_master_process.h \
"
NXT_SRCS=" \
src/nxt_main.c \
src/nxt_app_log.c \
src/nxt_cycle.c \
src/nxt_process_chan.c \
src/nxt_application.c \
src/nxt_master_process.c \
src/nxt_worker_process.c \
"

184
auto/ssltls Normal file
View File

@@ -0,0 +1,184 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
NXT_OPENSSL_CFLAGS=
NXT_OPENSSL_LIBS=
NXT_GNUTLS_CFLAGS=
NXT_GNUTLS_LIBS=
NXT_OPENSSL_LIBS=
NXT_CYASSL_CFLAGS=
NXT_CYASSL_LIBS=
NXT_POLARSSL_CFLAGS=
NXT_POLARSSL_LIBS=
if [ $NXT_OPENSSL = YES ]; then
nxt_feature="OpenSSL library"
nxt_feature_name=NXT_HAVE_OPENSSL
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs="-lssl -lcrypto"
nxt_feature_test="#include <openssl/ssl.h>
int main() {
SSL_library_init();
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_SSLTLS=YES
NXT_OPENSSL_LIBS="$nxt_feature_libs"
nxt_feature="OpenSSL version"
nxt_feature_name=NXT_HAVE_OPENSSL_VERSION
nxt_feature_run=value
nxt_feature_test="#include <openssl/ssl.h>
int main() {
printf(\"\\\"%s\\\"\",
SSLeay_version(SSLEAY_VERSION));
return 0;
}"
. auto/feature
else
$echo
$echo $0: error: no OpenSSL library found.
$echo
exit 1;
fi
fi
if [ $NXT_GNUTLS = YES ]; then
if /bin/sh -c "(pkg-config gnutls --exists)" >> $NXT_AUTOCONF_ERR 2>&1;
then
NXT_GNUTLS_CFLAGS=`pkg-config gnutls --cflags`
NXT_GNUTLS_LIBS=`pkg-config gnutls --libs`
nxt_feature="GnuTLS library"
nxt_feature_name=NXT_HAVE_GNUTLS
nxt_feature_run=yes
nxt_feature_incs=$NXT_GNUTLS_CFLAGS
nxt_feature_libs=$NXT_GNUTLS_LIBS
nxt_feature_test="#include <gnutls/gnutls.h>
int main() {
gnutls_global_init();
gnutls_global_deinit();
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_SSLTLS=YES
$echo " + GnuTLS version: `pkg-config gnutls --modversion`"
nxt_feature="gnutls_transport_set_vec_push_function"
nxt_feature_name=NXT_HAVE_GNUTLS_VEC_PUSH
nxt_feature_run=no
nxt_feature_incs=$NXT_GNUTLS_CFLAGS
nxt_feature_libs=$NXT_GNUTLS_LIBS
nxt_feature_test="#include <gnutls/gnutls.h>
int main() {
gnutls_transport_set_vec_push_function(NULL, NULL);
return 0;
}"
. auto/feature
nxt_feature="gnutls_global_set_time_function"
nxt_feature_name=NXT_HAVE_GNUTLS_SET_TIME
nxt_feature_run=no
nxt_feature_incs=$NXT_GNUTLS_CFLAGS
nxt_feature_libs=$NXT_GNUTLS_LIBS
nxt_feature_test="#include <gnutls/gnutls.h>
int main() {
gnutls_global_set_time_function(NULL);
return 0;
}"
. auto/feature
else
$echo
$echo $0: error: no GnuTLS library found.
$echo
exit 1;
fi
fi
fi
if [ $NXT_CYASSL = YES ]; then
nxt_feature="CyaSSL library"
nxt_feature_name=NXT_HAVE_CYASSL
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs="-lcyassl"
nxt_feature_test="#include <cyassl/ssl.h>
int main() {
CyaSSL_Init();
CyaSSL_Cleanup();
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_SSLTLS=YES
NXT_CYASSL_CFLAGS="$nxt_feature_incs"
NXT_CYASSL_LIBS="$nxt_feature_libs"
else
$echo
$echo $0: error: no CyaSSL library found.
$echo
exit 1;
fi
fi
if [ $NXT_POLARSSL = YES ]; then
nxt_feature="PolarSSL library"
nxt_feature_name=NXT_HAVE_POLARSSL
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs="-lpolarssl"
nxt_feature_test="#include <polarssl/ssl.h>
int main() {
ssl_context ssl;
memset(&ssl, '\0', sizeof(ssl));
ssl_init(&ssl);
ssl_free(&ssl);
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_SSLTLS=YES
NXT_POLARSSL_CFLAGS="$nxt_feature_incs"
NXT_POLARSSL_LIBS="$nxt_feature_libs"
else
$echo
$echo $0: error: no PolarSSL library found.
$echo
exit 1;
fi
fi

76
auto/test_build Normal file
View File

@@ -0,0 +1,76 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
NXT_TEST_BUILD=NO
if [ $NXT_TEST_BUILD_EPOLL = YES ]; then
nxt_have=NXT_TEST_BUILD_EPOLL . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_EVENTPORT = YES ]; then
nxt_have=NXT_TEST_BUILD_EVENTPORT . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_DEVPOLL = YES ]; then
nxt_have=NXT_TEST_BUILD_DEVPOLL . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_POLLSET = YES ]; then
nxt_have=NXT_TEST_BUILD_POLLSET . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_LINUX_SENDFILE = YES ]; then
nxt_have=NXT_TEST_BUILD_LINUX_SENDFILE . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_FREEBSD_SENDFILE = YES ]; then
nxt_have=NXT_TEST_BUILD_FREEBSD_SENDFILE . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_SOLARIS_SENDFILEV = YES ]; then
nxt_have=NXT_TEST_BUILD_SOLARIS_SENDFILEV . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_MACOSX_SENDFILE = YES ]; then
nxt_have=NXT_TEST_BUILD_MACOSX_SENDFILE . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_AIX_SEND_FILE = YES ]; then
nxt_have=NXT_TEST_BUILD_AIX_SEND_FILE . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi
if [ $NXT_TEST_BUILD_HPUX_SENDFILE = YES ]; then
nxt_have=NXT_TEST_BUILD_HPUX_SENDFILE . auto/have
nxt_have=NXT_TEST_BUILD . auto/have
NXT_TEST_BUILD=YES
fi

282
auto/threads Normal file
View File

@@ -0,0 +1,282 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
case "$NXT_SYSTEM" in
Linux)
NXT_PTHREAD="-lpthread"
;;
FreeBSD)
# FreeBSD libc supports only pthread stubs.
NXT_PTHREAD="-lpthread"
;;
SunOS)
case "$NXT_SYSTEM_VERSION" in
5.8 | 5.9)
NXT_PTHREAD="-lpthread"
;;
*)
# Solaris 10 libpthread.so.1 is a filter to libc.so.1.
NXT_PTHREAD=
;;
esac
;;
Darwin)
# MacOSX libpthread.dylib is a symlink to libSystem.dylib.
NXT_PTHREAD=
;;
*)
NXT_PTHREAD="-lpthread"
;;
esac
# Linux, FreeBSD.
nxt_feature="pthread_yield()"
nxt_feature_name=NXT_HAVE_PTHREAD_YIELD
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=$NXT_PTHREAD
nxt_feature_test="#define _GNU_SOURCE
#include <pthread.h>
int main() {
pthread_yield();
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# MacOSX.
nxt_feature="pthread_yield_np()"
nxt_feature_name=NXT_HAVE_PTHREAD_YIELD_NP
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=$NXT_PTHREAD
nxt_feature_test="#include <pthread.h>
int main() {
pthread_yield_np();
return 0;
}"
. auto/feature
fi
# FreeBSD, Solaris, AIX.
nxt_feature="pthread spinlock"
nxt_feature_name=NXT_HAVE_PTHREAD_SPINLOCK
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=$NXT_PTHREAD
nxt_feature_test="#include <pthread.h>
int main() {
pthread_spinlock_t lock;
if (pthread_spin_init(&lock, PTHREAD_PROCESS_PRIVATE) != 0)
return 1;
if (pthread_spin_lock(&lock) != 0)
return 1;
if (pthread_spin_unlock(&lock) != 0)
return 1;
if (pthread_spin_destroy(&lock) != 0)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
# Linux glibc uses 0 as pthread_spinlock_t initial value on the most
# platforms. However, on i386 and x86_64 the initial value is 1.
nxt_feature="pthread spinlock zero initial value"
nxt_feature_name=NXT_HAVE_PTHREAD_SPINLOCK_ZERO
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=$NXT_PTHREAD
nxt_feature_test="#include <pthread.h>
pthread_spinlock_t lock = 0;
int main() {
if (pthread_spin_trylock(&lock) != 0)
return 1;
if (pthread_spin_unlock(&lock) != 0)
return 1;
return 0;
}"
. auto/feature
fi
if [ $nxt_found = no ]; then
# MacOSX spinlock(3).
nxt_feature="MacOSX spinlock"
nxt_feature_name=NXT_HAVE_MACOSX_SPINLOCK
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=$NXT_PTHREAD
nxt_feature_test="#include <libkern/OSAtomic.h>
int main() {
OSSpinLock lock = 0;
if (OSSpinLockTry(&lock) == 0)
return 1;
OSSpinLockUnlock(&lock);
return 0;
}"
. auto/feature
fi
nxt_feature="sem_timedwait()"
nxt_feature_name=NXT_HAVE_SEM_TIMEDWAIT
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <semaphore.h>
int main() {
sem_t sem;
struct timespec ts;
if (sem_init(&sem, 0, 0) != 0)
return 1;
if (sem_post(&sem) != 0)
return 1;
ts.tv_sec = 0;
ts.tv_nsec = 0;
if (sem_timedwait(&sem, &ts) != 0)
return 1;
if (sem_destroy(&sem) != 0)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
if [ -n "$NXT_PTHREAD" ]; then
# Linux requires libpthread.
nxt_feature="sem_timedwait() in libpthread"
nxt_feature_libs=$NXT_PTHREAD
. auto/feature
fi
if [ $nxt_found = no ]; then
# Solaris 10 requires librt.
nxt_feature="sem_timedwait() in librt"
nxt_feature_libs="-lrt"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_LIBRT="-lrt"
fi
fi
fi
# Thread Local Storage / Thread Specific Data.
#
# Linux, FreeBSD 5.3, Solaris.
# MacOSX 10.7 (Lion) Clang. However, if the -mmacosx-version-min
# option is specified it must be at least 10.7.
nxt_feature="__thread"
nxt_feature_name=NXT_HAVE_THREAD_STORAGE_CLASS
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=$NXT_PTHREAD
nxt_feature_test="#include <pthread.h>
#include <stdlib.h>
__thread int key;
void *func(void *p);
void *func(void *p) {
key = 0x9abcdef0;
return NULL;
}
int main() {
void *n;
pthread_t pt;
key = 0x12345678;
if (pthread_create(&pt, NULL, func, NULL))
return 1;
if (pthread_join(pt, &n))
return 1;
if (key != 0x12345678)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# MacOSX GCC lacks __thread support.
# On NetBSD __thread causes segmentation fault.
nxt_feature="phtread_key_t"
nxt_feature_name=NXT_HAVE_PTHREAD_SPECIFIC_DATA
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=$NXT_PTHREAD
nxt_feature_test="#include <pthread.h>
int main() {
pthread_key_t key = -1;
if (pthread_key_create(&key, NULL))
return 1;
if (pthread_setspecific(key, (void *) 0x12345678))
return 1;
if (pthread_getspecific(key) != (void *) 0x12345678)
return 1;
return 0;
}"
. auto/feature
nxt_feature="PTHREAD_KEYS_MAX"
nxt_feature_name=
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <limits.h>
#include <pthread.h>
#include <stdio.h>
int main() {
printf(\"%d\", PTHREAD_KEYS_MAX);
return 0;
}"
. auto/feature
fi

226
auto/time Normal file
View File

@@ -0,0 +1,226 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# Linux 2.6.32 CLOCK_REALTIME_COARSE.
# Linux clock_gettime() is in librt.
NXT_LIBRT=
nxt_feature="Linux clock_gettime(CLOCK_REALTIME_COARSE)"
nxt_feature_name=NXT_HAVE_CLOCK_REALTIME_COARSE
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs="-lrt"
nxt_feature_test="#include <time.h>
int main() {
struct timespec ts;
if (clock_gettime(CLOCK_REALTIME_COARSE, &ts) == -1)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_LIBRT=$nxt_feature_libs
fi
# FreeBSD 7.0 CLOCK_REALTIME_FAST
nxt_feature="FreeBSD clock_gettime(CLOCK_REALTIME_FAST)"
nxt_feature_name=NXT_HAVE_CLOCK_REALTIME_FAST
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <time.h>
int main() {
struct timespec ts;
if (clock_gettime(CLOCK_REALTIME_FAST, &ts) == -1)
return 1;
return 0;
}"
. auto/feature
nxt_feature="clock_gettime(CLOCK_REALTIME)"
nxt_feature_name=NXT_HAVE_CLOCK_REALTIME
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <time.h>
int main() {
struct timespec ts;
if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# Linux and Solaris 10 clock_gettime() are in librt.
nxt_feature="clock_gettime(CLOCK_REALTIME) in librt"
nxt_feature_libs="-lrt"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_LIBRT=$nxt_feature_libs
fi
fi
# Linux 2.6.32 CLOCK_MONOTONIC_COARSE.
# Linux clock_gettime() is in librt.
nxt_feature="Linux clock_gettime(CLOCK_MONOTONIC_COARSE)"
nxt_feature_name=NXT_HAVE_CLOCK_MONOTONIC_COARSE
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs="-lrt"
nxt_feature_test="#include <time.h>
int main() {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == -1)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_LIBRT=$nxt_feature_libs
fi
# FreeBSD 7.0 CLOCK_MONOTONIC_FAST
nxt_feature="FreeBSD clock_gettime(CLOCK_MONOTONIC_FAST)"
nxt_feature_name=NXT_HAVE_CLOCK_MONOTONIC_FAST
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <time.h>
int main() {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC_FAST, &ts) == -1)
return 1;
return 0;
}"
. auto/feature
nxt_feature="clock_gettime(CLOCK_MONOTONIC)"
nxt_feature_name=NXT_HAVE_CLOCK_MONOTONIC
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <time.h>
int main() {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
return 1;
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# Linux and Solaris 10 clock_gettime() are in librt.
nxt_feature="clock_gettime(CLOCK_MONOTONIC) in librt"
nxt_feature_libs="-lrt"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_LIBRT=$nxt_feature_libs
fi
fi
# HP-UX Mercury Library hg_gethrtime().
NXT_LIBHG=
nxt_feature="HP-UX hg_gethrtime()"
nxt_feature_name=NXT_HAVE_HG_GETHRTIME
nxt_feature_run=yes
nxt_feature_incs=
nxt_feature_libs="-lhg"
nxt_feature_test="#include <stdlib.h>
#include <sys/mercury.h>
int main() {
hg_gethrtime();
return 0;
}"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_LIBHG=$nxt_feature_libs
fi
nxt_feature="struct tm.tm_gmtoff"
nxt_feature_name=NXT_HAVE_TM_GMTOFF
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <time.h>
int main() {
time_t t;
struct tm tm;
t = 0;
localtime_r(&t, &tm);
return tm.tm_gmtoff;
}"
. auto/feature
nxt_feature="altzone"
nxt_feature_name=NXT_HAVE_ALTZONE
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <time.h>
int main() {
altzone = 0;
return 0;
}"
. auto/feature
nxt_feature="localtime_r()"
nxt_feature_name=NXT_HAVE_LOCALTIME_R
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <time.h>
int main() {
time_t t;
struct tm tm;
t = 0;
localtime_r(&t, &tm);
return 0;
}"
. auto/feature

118
auto/types Normal file
View File

@@ -0,0 +1,118 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# Sizes of C types.
# "-Wall -Werror" or similar constraints in default CFLAGS may require
# to use "%zu" format to printf() result of sizeof(). But "%zu" may
# be unavailable, so the "(int)" cast is a simple and portable solution:
# printf("%d", (int) sizeof(TYPE));
nxt_feature="int size"
nxt_feature_name=NXT_INT_SIZE
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
int main() {
printf(\"%d\", (int) sizeof(int));
return 0;
}"
. auto/feature
nxt_feature="long size"
nxt_feature_name=NXT_LONG_SIZE
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
int main() {
printf(\"%d\", (int) sizeof(long));
return 0;
}"
. auto/feature
nxt_feature="long long size"
nxt_feature_name=NXT_LONG_LONG_SIZE
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
int main() {
printf(\"%d\", (int) sizeof(long long));
return 0;
}"
. auto/feature
nxt_feature="void * size"
nxt_feature_name=NXT_PTR_SIZE
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
int main() {
printf(\"%d\", (int) sizeof(void *));
return 0;
}"
. auto/feature
case "$nxt_feature_value" in
8) NXT_64BIT=1 ;;
*) NXT_64BIT=0 ;;
esac
nxt_feature="size_t size"
nxt_feature_name=NXT_SIZE_T_SIZE
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdio.h>
int main() {
printf(\"%d\", (int) sizeof(size_t));
return 0;
}"
. auto/feature
nxt_feature="off_t size"
nxt_feature_name=NXT_OFF_T_SIZE
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#define _FILE_OFFSET_BITS 64
#include <unistd.h>
#include <stdio.h>
int main() {
printf(\"%d\", (int) sizeof(off_t));
return 0;
}"
. auto/feature
nxt_feature="time_t size"
nxt_feature_name=NXT_TIME_T_SIZE
nxt_feature_run=value
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <time.h>
#include <stdio.h>
int main() {
printf(\"%d\", (int) sizeof(time_t));
return 0;
}"
. auto/feature

153
auto/unix Normal file
View File

@@ -0,0 +1,153 @@
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
nxt_feature="arc4random()"
nxt_feature_name=NXT_HAVE_ARC4RANDOM
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
int main() {
(void) arc4random();
return 0;
}"
. auto/feature
# Linux 3.17 getrandom().
nxt_feature="getrandom()"
nxt_feature_name=NXT_HAVE_GETRANDOM
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <linux/random.h>
int main() {
char buf[4];
(void) getrandom(buf, 4, 0);
return 0;
}"
. auto/feature
nxt_feature="ucontext"
nxt_feature_name=NXT_HAVE_UCONTEXT
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <ucontext.h>
int main() {
ucontext_t uc;
if (getcontext(&uc) == 0) {
makecontext(&uc, NULL, 0);
setcontext(&uc);
}
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# MacOSX 10.6 (Snow Leopard) has deprecated ucontext
# and requires _XOPEN_SOURCE to be defined.
nxt_feature="_XOPEN_SOURCE ucontext"
nxt_feature_name=NXT_HAVE_UCONTEXT
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#define _XOPEN_SOURCE
#include <stdlib.h>
#include <ucontext.h>
int main() {
ucontext_t uc;
if (getcontext(&uc) == 0) {
makecontext(&uc, NULL, 0);
setcontext(&uc);
}
return 0;
}"
. auto/feature
fi
# FreeBSD dlopen() is in libc.
# MacOSX libdl.dylib is a symlink to libSystem.dylib.
NXT_LIBDL=
nxt_feature="dlopen()"
nxt_feature_name=NXT_HAVE_DLOPEN
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <dlfcn.h>
int main() {
dlopen(NULL, 0);
return 0;
}"
. auto/feature
if [ $nxt_found = no ]; then
# Linux and Solaris prior to 10 require libdl.
# Solaris 10 libdl.so.1 is a filter to /usr/lib/ld.so.1.
nxt_feature="dlopen() in libdl"
nxt_feature_libs="-ldl"
. auto/feature
if [ $nxt_found = yes ]; then
NXT_LIBDL="-ldl"
fi
fi
nxt_feature="posix_spawn()"
nxt_feature_name=NXT_HAVE_POSIX_SPAWN
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <spawn.h>
#include <unistd.h>
int main() {
(void) posix_spawn(NULL, NULL, NULL, NULL, NULL, NULL);
return 0;
}"
. auto/feature
# NetBSD 1.0, OpenBSD 1.0, FreeBSD 2.2 setproctitle().
nxt_feature="setproctitle()"
nxt_feature_name=NXT_HAVE_SETPROCTITLE
nxt_feature_run=
nxt_feature_incs=
nxt_feature_libs=
nxt_feature_test="#include <stdlib.h>
#include <unistd.h>
int main() {
setproctitle(\"%s\", \"title\");
return 0;
}"
. auto/feature

116
configure vendored Executable file
View File

@@ -0,0 +1,116 @@
#!/bin/sh
# Copyright (C) Igor Sysoev
# Copyright (C) NGINX, Inc.
# Disable localized program messages.
LANG=C
export LANG
# Stop on error exit status.
set -e
# Stop on uninitialized variable.
set -u
# Initialize variables with null values if they are not defined.
CFLAGS=${CFLAGS=}
NXT_TEST_CFLAGS=${NXT_TEST_CFLAGS=}
NXT_TEST_LIBS=${NXT_TEST_LIBS=}
NXT_UNIT_TEST_TARGETS=${NXT_UNIT_TEST_TARGETS=}
. auto/os/test
. auto/options
test -d $NXT_BUILD_DIR || mkdir $NXT_BUILD_DIR
NXT_AUTOTEST=$NXT_BUILD_DIR/autotest
NXT_AUTOCONF_ERR=$NXT_BUILD_DIR/autoconf.err
NXT_AUTO_CONFIG_H=$NXT_BUILD_DIR/nxt_auto_config.h
NXT_MAKEFILE=$NXT_BUILD_DIR/Makefile
> $NXT_AUTOCONF_ERR
> $NXT_AUTO_CONFIG_H
. auto/cc/test
cat << END >> $NXT_AUTO_CONFIG_H
#define NXT_CONFIGURE_OPTIONS "$NXT_CONFIGURE_OPTIONS"
#define NXT_SYSTEM_VERSION "$NXT_SYSTEM $NXT_SYSTEM_VERSION $NXT_SYSTEM_PLATFORM"
#define NXT_COMPILER_VERSION "$NXT_CC_VERSION"
END
if [ $echo = echo ]; then
# Build a portable "echo" program that supports only "-n" option.
# This also tests C compiler ability to create executables.
. auto/echo/build
fi
nxt_have=NXT_UNIX . auto/have
if [ $NXT_UNIX_DOMAIN = YES ]; then
nxt_have=NXT_HAVE_UNIX_DOMAIN . auto/have
fi
. auto/types
. auto/clang
. auto/atomic
. auto/malloc
. auto/mmap
. auto/time
if [ $NXT_THREADS = YES ]; then
. auto/threads
else
NXT_PTHREAD=
fi
. auto/events
. auto/sockets
. auto/sendfile
. auto/files
. auto/unix
. auto/os/conf
. auto/ssltls
. auto/pcre
case "$NXT_SYSTEM_PLATFORM" in
i386 | amd64 | x86_64)
nxt_have=NXT_HAVE_LITTLE_ENDIAN . auto/have
nxt_have=NXT_HAVE_NONALIGNED . auto/have
;;
esac
if [ $NXT_DEBUG = YES ]; then
nxt_have=NXT_DEBUG . auto/have
fi
if [ $NXT_THREADS = YES ]; then
nxt_have=NXT_THREADS . auto/have
fi
. auto/test_build
. auto/sources
# LOOK
NXT_LIB_AUX_CFLAGS="$NXT_OPENSSL_CFLAGS $NXT_GNUTLS_CFLAGS \\
$NXT_CYASSL_CFLAGS $NXT_POLARSSL_CFLAGS \\
$NXT_PCRE_CFLAGS"
NXT_LIB_AUX_LIBS="$NXT_OPENSSL_LIBS $NXT_GNUTLS_LIBS \\
$NXT_CYASSL_LIBS $NXT_POLARSSL_LIBS \\
$NXT_PCRE_LIB"
. auto/modules/conf
. auto/make

128
src/nxt_aix_send_file.c Normal file
View File

@@ -0,0 +1,128 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/* send_file() has been introduced in AIX 4.3.2 */
ssize_t nxt_aix_event_conn_io_send_file(nxt_event_conn_t *c, nxt_buf_t *b,
size_t limit);
ssize_t
nxt_aix_event_conn_io_send_file(nxt_event_conn_t *c, nxt_buf_t *b, size_t limit)
{
ssize_t n;
nxt_buf_t *fb;
nxt_err_t err;
nxt_off_t file_size, sent;
nxt_uint_t nhd, ntr;
struct iovec hd[NXT_IOBUF_MAX], tr;
struct sf_parms sfp;
nxt_sendbuf_coalesce_t sb;
sb.buf = b;
sb.iobuf = hd;
sb.nmax = NXT_IOBUF_MAX;
sb.sync = 0;
sb.size = 0;
sb.limit = limit;
nhd = nxt_sendbuf_mem_coalesce(&sb);
if (nhd == 0 && sb.sync) {
return 0;
}
if (nhd > 1 || sb.buf == NULL || !nxt_buf_is_file(sb.buf)) {
return nxt_event_conn_io_writev(c, hd, nhd);
}
fb = sb.buf;
file_size = nxt_sendbuf_file_coalesce(&sb);
if (file_size == 0) {
return nxt_event_conn_io_writev(c, hd, nhd);
}
sb.iobuf = &tr;
sb.nmax = 1;
ntr = nxt_sendbuf_mem_coalesce(&sb);
nxt_memzero(&sfp, sizeof(struct sf_parms));
if (nhd != 0) {
sfp.header_data = hd[0].iov_base;
sfp.header_length = hd[0].iov_len;
}
sfp.file_descriptor = fb->file->fd;
sfp.file_offset = fb->file_pos;
sfp.file_bytes = file_size;
if (ntr != 0) {
sfp.trailer_data = tr.iov_base;
sfp.trailer_length = tr.iov_len;
}
nxt_log_debug(c->socket.log, "send_file(%d) fd:%FD @%O:%O hd:%ui tr:%ui",
c->socket.fd, fb->file->fd, fb->file_pos, file_size,
nhd, ntr);
n = send_file(&c->socket.fd, &sfp, 0);
err = (n == -1) ? nxt_errno : 0;
sent = sfp.bytes_sent;
nxt_log_debug(c->socket.log, "send_file(%d): %d sent:%O",
c->socket.fd, n, sent);
/*
* -1 an error has occurred, errno contains the error code;
* 0 the command has completed successfully;
* 1 the command was completed partially, some data has been
* transmitted but the command has to return for some reason,
* for example, the command was interrupted by signals.
*/
if (n == -1) {
switch (err) {
case NXT_EAGAIN:
c->socket.write_ready = 0;
break;
case NXT_EINTR:
break;
default:
c->socket.error = err;
nxt_log_error(nxt_socket_error_level(err, c->socket.log_error),
c->socket.log, "send_file(%d) failed %E \"%FN\" "
"fd:%FD @%O:%O hd:%ui tr:%ui", c->socket.fd, err,
fb->file->name, fb->file->fd, fb->file_pos,
file_size, nhd, ntr);
return NXT_ERROR;
}
nxt_log_debug(c->socket.log, "sendfile() %E", err);
return sent;
}
if (n == 1) {
return sent;
}
if (sent < (nxt_off_t) sb.size) {
c->socket.write_ready = 0;
}
return sent;
}

126
src/nxt_app_log.c Normal file
View File

@@ -0,0 +1,126 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#include <nxt_cycle.h>
static nxt_time_string_t nxt_log_error_time_cache;
static u_char *nxt_log_error_time(u_char *buf, nxt_realtime_t *now,
struct tm *tm, size_t size, const char *format);
static nxt_time_string_t nxt_log_debug_time_cache;
static u_char *nxt_log_debug_time(u_char *buf, nxt_realtime_t *now,
struct tm *tm, size_t size, const char *format);
void nxt_cdecl
nxt_log_time_handler(nxt_uint_t level, nxt_log_t *log, const char *fmt, ...)
{
u_char *p, *syslogmsg, *end;
va_list args;
nxt_fid_t fid;
const char *id;
nxt_fiber_t *fib;
nxt_thread_t *thr;
nxt_time_string_t *time_cache;
u_char msg[NXT_MAX_ERROR_STR];
thr = nxt_thread();
end = msg + NXT_MAX_ERROR_STR;
time_cache = (log->level != NXT_LOG_DEBUG) ? &nxt_log_error_time_cache:
&nxt_log_debug_time_cache;
p = nxt_thread_time_string(thr, time_cache, msg);
syslogmsg = p;
fib = nxt_fiber_self(thr);
if (fib != NULL) {
id = "[%V] %PI#%PT#%PF ";
fid = nxt_fiber_id(fib);
} else {
id = "[%V] %PI#%PT ";
fid = 0;
}
p = nxt_sprintf(p, end, id, &nxt_log_levels[level], nxt_pid,
nxt_thread_tid(thr), fid);
if (log->ident != 0) {
p = nxt_sprintf(p, end, "*%D ", log->ident);
}
va_start(args, fmt);
p = nxt_vsprintf(p, end, fmt, args);
va_end(args);
if (level != NXT_LOG_DEBUG && log->ctx_handler != NULL) {
p = log->ctx_handler(log->ctx, p, end);
}
if (p > end - NXT_LINEFEED_SIZE) {
p = end - NXT_LINEFEED_SIZE;
}
nxt_linefeed(p);
(void) nxt_write_console(nxt_stderr, msg, p - msg);
if (level <= NXT_LOG_ALERT) {
*(p - NXT_LINEFEED_SIZE) = '\0';
/*
* The syslog LOG_ALERT level is enough, because
* LOG_EMERG level broadcasts a message to all users.
*/
nxt_write_syslog(LOG_ALERT, syslogmsg);
}
}
static nxt_time_string_t nxt_log_error_time_cache = {
(nxt_atomic_uint_t) -1,
nxt_log_error_time,
"%4d/%02d/%02d %02d:%02d:%02d ",
sizeof("1970/09/28 12:00:00 ") - 1,
NXT_THREAD_TIME_LOCAL,
NXT_THREAD_TIME_MSEC,
};
static u_char *
nxt_log_error_time(u_char *buf, nxt_realtime_t *now, struct tm *tm, size_t size,
const char *format)
{
return nxt_sprintf(buf, buf + size, format,
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
}
static nxt_time_string_t nxt_log_debug_time_cache = {
(nxt_atomic_uint_t) -1,
nxt_log_debug_time,
"%4d/%02d/%02d %02d:%02d:%02d.%03d ",
sizeof("1970/09/28 12:00:00.000 ") - 1,
NXT_THREAD_TIME_LOCAL,
NXT_THREAD_TIME_MSEC,
};
static u_char *
nxt_log_debug_time(u_char *buf, nxt_realtime_t *now, struct tm *tm, size_t size,
const char *format)
{
return nxt_sprintf(buf, buf + size, format,
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec,
now->nsec / 1000000);
}

903
src/nxt_application.c Normal file
View File

@@ -0,0 +1,903 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Valentin V. Bartenev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#include <nxt_cycle.h>
#include <nxt_application.h>
#define NXT_PARSE_AGAIN (u_char *) -1
static void nxt_app_thread(void *ctx);
static nxt_app_request_t *nxt_app_request_create(nxt_socket_t s,
nxt_log_t *log);
static void nxt_app_conn_update(nxt_thread_t *thr, nxt_event_conn_t *c,
nxt_log_t *log);
static nxt_int_t nxt_app_write_finish(nxt_app_request_t *r);
static void nxt_app_buf_complettion(nxt_thread_t *thr, void *obj, void *data);
static void nxt_app_delivery_handler(nxt_thread_t *thr, void *obj, void *data);
static void nxt_app_delivery_ready(nxt_thread_t *thr, void *obj, void *data);
static void nxt_app_delivery_complettion(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_app_delivery_error(nxt_thread_t *thr, void *obj, void *data);
static void nxt_app_delivery_timeout(nxt_thread_t *thr, void *obj, void *data);
static nxt_msec_t nxt_app_delivery_timer_value(nxt_event_conn_t *c,
uintptr_t data);
static void nxt_app_delivery_done(nxt_thread_t *thr, nxt_event_conn_t *c);
static void nxt_app_close_request(nxt_thread_t *thr, nxt_app_request_t *r);
typedef struct nxt_app_http_parse_state_s nxt_app_http_parse_state_t;
struct nxt_app_http_parse_state_s {
u_char *pos;
nxt_int_t (*handler)(nxt_app_request_header_t *h, u_char *start,
u_char *end, nxt_app_http_parse_state_t *state);
};
static nxt_int_t nxt_app_http_parse_request(nxt_app_request_t *r, u_char *buf,
size_t size);
static nxt_int_t nxt_app_http_parse_request_line(nxt_app_request_header_t *h,
u_char *start, u_char *end, nxt_app_http_parse_state_t *state);
static nxt_int_t nxt_app_http_parse_field_value(nxt_app_request_header_t *h,
u_char *start, u_char *end, nxt_app_http_parse_state_t *state);
static nxt_int_t nxt_app_http_parse_field_name(nxt_app_request_header_t *h,
u_char *start, u_char *end, nxt_app_http_parse_state_t *state);
static nxt_int_t nxt_app_http_process_headers(nxt_app_request_t *r);
static const nxt_event_conn_state_t nxt_app_delivery_write_state;
static nxt_application_module_t *nxt_app = &nxt_python_module;
static nxt_thread_mutex_t nxt_app_mutex;
static nxt_thread_cond_t nxt_app_cond;
static nxt_buf_t *nxt_app_buf_free;
static nxt_buf_t *nxt_app_buf_done;
static nxt_event_engine_t *nxt_app_engine;
static nxt_mem_pool_t *nxt_app_mem_pool;
static nxt_uint_t nxt_app_buf_current_number;
static nxt_uint_t nxt_app_buf_max_number = 16;
nxt_int_t
nxt_app_start(nxt_cycle_t *cycle)
{
nxt_thread_link_t *link;
nxt_thread_handle_t handle;
if (nxt_slow_path(nxt_thread_mutex_create(&nxt_app_mutex) != NXT_OK)) {
return NXT_ERROR;
}
if (nxt_slow_path(nxt_thread_cond_create(&nxt_app_cond) != NXT_OK)) {
return NXT_ERROR;
}
link = nxt_malloc(sizeof(nxt_thread_link_t));
if (nxt_fast_path(link != NULL)) {
link->start = nxt_app_thread;
link->data = cycle;
link->engine = NULL;
link->exit = NULL;
return nxt_thread_create(&handle, link);
}
return NXT_ERROR;
}
#define SIZE 4096
static void
nxt_app_thread(void *ctx)
{
ssize_t n;
nxt_err_t err;
nxt_cycle_t *cycle;
nxt_socket_t s;
nxt_thread_t *thr;
nxt_app_request_t *r;
nxt_event_engine_t **engines;
nxt_listen_socket_t *ls;
u_char buf[SIZE];
const size_t size = SIZE;
nxt_app_header_field_t fields[128];
thr = nxt_thread();
nxt_log_debug(thr->log, "app thread");
cycle = ctx;
engines = cycle->engines->elts;
nxt_app_engine = engines[0];
nxt_app_mem_pool = nxt_mem_pool_create(512);
if (nxt_slow_path(nxt_app_mem_pool == NULL)) {
return;
}
if (nxt_slow_path(nxt_app->init(thr) != NXT_OK)) {
nxt_log_debug(thr->log, "application init failed");
}
ls = cycle->listen_sockets->elts;
for ( ;; ) {
s = accept(ls->socket, NULL, NULL);
if (nxt_slow_path(s == -1)) {
err = nxt_socket_errno;
nxt_log_error(NXT_LOG_ERR, thr->log, "accept(%d) failed %E",
ls->socket, err);
if (err == EBADF) {
/* STUB: ls->socket has been closed on exit. */
return;
}
continue;
}
nxt_log_debug(thr->log, "accept(%d): %d", ls->socket, s);
n = recv(s, buf, size, 0);
if (nxt_slow_path(n <= 0)) {
err = (n == 0) ? 0 : nxt_socket_errno;
nxt_log_error(NXT_LOG_ERR, thr->log, "recv(%d, %uz) failed %E",
s, size, err);
close(s);
continue;
}
nxt_log_debug(thr->log, "recv(%d, %uz): %z", s, size, n);
r = nxt_app_request_create(s, thr->log);
if (nxt_slow_path(r == NULL)) {
goto fail;
}
r->header.fields = fields;
//nxt_app->start(r);
if (nxt_app_http_parse_request(r, buf, n) != NXT_OK) {
nxt_log_debug(thr->log, "nxt_app_http_parse_request() failed");
nxt_mem_pool_destroy(r->mem_pool);
goto fail;
}
if (nxt_app_http_process_headers(r) != NXT_OK) {
nxt_log_debug(thr->log, "nxt_app_http_process_headers() failed");
nxt_mem_pool_destroy(r->mem_pool);
goto fail;
}
nxt_app->run(r);
if (nxt_slow_path(nxt_app_write_finish(r) == NXT_ERROR)) {
goto fail;
}
continue;
fail:
close(s);
nxt_nanosleep(1000000000); /* 1s */
}
}
static nxt_app_request_t *
nxt_app_request_create(nxt_socket_t s, nxt_log_t *log)
{
nxt_mem_pool_t *mp;
nxt_event_conn_t *c;
nxt_app_request_t *r;
mp = nxt_mem_pool_create(1024);
if (nxt_slow_path(mp == NULL)) {
return NULL;
}
r = nxt_mem_zalloc(mp, sizeof(nxt_app_request_t));
if (nxt_slow_path(r == NULL)) {
return NULL;
}
c = nxt_mem_zalloc(mp, sizeof(nxt_event_conn_t));
if (nxt_slow_path(c == NULL)) {
return NULL;
}
c->socket.fd = s;
c->socket.data = r;
r->mem_pool = mp;
r->event_conn = c;
r->log = log;
return r;
}
static nxt_int_t
nxt_app_http_parse_request(nxt_app_request_t *r, u_char *buf, size_t size)
{
u_char *end;
ssize_t n;
nxt_err_t err;
nxt_socket_t s;
nxt_app_http_parse_state_t state;
end = buf + size;
state.pos = buf;
state.handler = nxt_app_http_parse_request_line;
for ( ;; ) {
switch (state.handler(&r->header, state.pos, end, &state)) {
case NXT_OK:
continue;
case NXT_DONE:
r->body_preread.len = end - state.pos;
r->body_preread.data = state.pos;
return NXT_OK;
case NXT_AGAIN:
s = r->event_conn->socket.fd;
n = recv(s, end, SIZE - size, 0);
if (nxt_slow_path(n <= 0)) {
err = (n == 0) ? 0 : nxt_socket_errno;
nxt_log_error(NXT_LOG_ERR, r->log, "recv(%d, %uz) failed %E",
s, size, err);
return NXT_ERROR;
}
nxt_log_debug(r->log, "recv(%d, %uz): %z", s, SIZE - size, n);
size += n;
end += n;
continue;
}
return NXT_ERROR;
}
}
static nxt_int_t
nxt_app_http_parse_request_line(nxt_app_request_header_t *h, u_char *start,
u_char *end, nxt_app_http_parse_state_t *state)
{
u_char *p;
for (p = start; /* void */; p++) {
if (nxt_slow_path(p == end)) {
state->pos = p;
return NXT_AGAIN;
}
if (*p == ' ') {
break;
}
}
h->method.len = p - start;
h->method.data = start;
start = p + 1;
p = nxt_memchr(start, ' ', end - start);
if (nxt_slow_path(p == NULL)) {
return NXT_AGAIN;
}
h->path.len = p - start;
h->path.data = start;
start = p + 1;
if (nxt_slow_path((size_t) (end - start) < sizeof("HTTP/1.1\n") - 1)) {
return NXT_AGAIN;
}
h->version.len = sizeof("HTTP/1.1") - 1;
h->version.data = start;
p = start + sizeof("HTTP/1.1") - 1;
if (nxt_slow_path(*p == '\n')) {
return nxt_app_http_parse_field_name(h, p + 1, end, state);
}
if (nxt_slow_path(end - p < 2)) {
return NXT_AGAIN;
}
return nxt_app_http_parse_field_name(h, p + 2, end, state);
}
static nxt_int_t
nxt_app_http_parse_field_name(nxt_app_request_header_t *h, u_char *start,
u_char *end, nxt_app_http_parse_state_t *state)
{
u_char *p;
nxt_app_header_field_t *fld;
if (nxt_slow_path(start == end)) {
goto again;
}
if (nxt_slow_path(*start == '\n')) {
state->pos = start + 1;
return NXT_DONE;
}
if (*start == '\r') {
if (nxt_slow_path(end - start < 2)) {
goto again;
}
if (nxt_slow_path(start[1] != '\n')) {
return NXT_ERROR;
}
state->pos = start + 2;
return NXT_DONE;
}
p = nxt_memchr(start, ':', end - start);
if (nxt_slow_path(p == NULL)) {
goto again;
}
fld = &h->fields[h->fields_num];
fld->name.len = p - start;
fld->name.data = start;
return nxt_app_http_parse_field_value(h, p + 1, end, state);
again:
state->pos = start;
state->handler = nxt_app_http_parse_field_name;
return NXT_AGAIN;
}
static nxt_int_t
nxt_app_http_parse_field_value(nxt_app_request_header_t *h, u_char *start,
u_char *end, nxt_app_http_parse_state_t *state)
{
u_char *p;
nxt_app_header_field_t *fld;
for ( ;; ) {
if (nxt_slow_path(start == end)) {
goto again;
}
if (*start != ' ') {
break;
}
start++;
}
p = nxt_memchr(start, '\n', end - start);
if (nxt_slow_path(p == NULL)) {
goto again;
}
fld = &h->fields[h->fields_num];
fld->value.len = p - start;
fld->value.data = start;
fld->value.len -= (p[-1] == '\r');
h->fields_num++;
state->pos = p + 1;
state->handler = nxt_app_http_parse_field_name;
return NXT_OK;
again:
state->pos = start;
state->handler = nxt_app_http_parse_field_value;
return NXT_AGAIN;
}
static nxt_int_t
nxt_app_http_process_headers(nxt_app_request_t *r)
{
nxt_uint_t i;
nxt_app_header_field_t *fld;
static const u_char content_length[14] = "Content-Length";
static const u_char content_type[12] = "Content-Type";
for (i = 0; i < r->header.fields_num; i++) {
fld = &r->header.fields[i];
if (fld->name.len == sizeof(content_length)
&& nxt_memcasecmp(fld->name.data, content_length,
sizeof(content_length)) == 0)
{
r->header.content_length = &fld->value;
r->body_rest = nxt_off_t_parse(fld->value.data, fld->value.len);
continue;
}
if (fld->name.len == sizeof(content_type)
&& nxt_memcasecmp(fld->name.data, content_type,
sizeof(content_type)) == 0)
{
r->header.content_type = &fld->value;
continue;
}
}
return NXT_OK;
}
static void
nxt_app_conn_update(nxt_thread_t *thr, nxt_event_conn_t *c, nxt_log_t *log)
{
static nxt_atomic_t ident = 1;
c->socket.write_ready = 1;
c->socket.log = &c->log;
c->log = *log;
/* The while loop skips possible uint32_t overflow. */
while (c->log.ident == 0) {
c->log.ident = (uint32_t) nxt_atomic_fetch_add(&ident, 1);
}
thr->engine->connections++;
c->io = thr->engine->event->io;
c->max_chunk = NXT_INT32_T_MAX;
c->sendfile = NXT_CONN_SENDFILE_UNSET;
c->socket.read_work_queue = &thr->engine->read_work_queue;
c->socket.write_work_queue = &thr->engine->write_work_queue;
c->read_work_queue = &thr->engine->read_work_queue;
c->write_work_queue = &thr->engine->write_work_queue;
nxt_event_conn_timer_init(&c->read_timer, c, c->socket.read_work_queue);
nxt_event_conn_timer_init(&c->write_timer, c, c->socket.write_work_queue);
nxt_log_debug(&c->log, "event connections: %uD", thr->engine->connections);
}
nxt_int_t
nxt_app_http_read_body(nxt_app_request_t *r, u_char *data, size_t len)
{
size_t preread;
ssize_t n;
nxt_err_t err;
if ((off_t) len > r->body_rest) {
len = (size_t) r->body_rest;
}
preread = 0;
if (r->body_preread.len != 0) {
preread = nxt_min(r->body_preread.len, len);
nxt_memcpy(data, r->body_preread.data, preread);
r->body_preread.len -= preread;
r->body_preread.data += preread;
r->body_rest -= preread;
len -= preread;
}
if (len == 0) {
return NXT_OK;
}
n = recv(r->event_conn->socket.fd, data + preread, len, 0);
if (nxt_slow_path(n < (ssize_t) len)) {
if (n <= 0) {
err = (n == 0) ? 0 : nxt_socket_errno;
nxt_log_error(NXT_LOG_ERR, r->log, "recv(%d, %uz) failed %E",
r->event_conn->socket.fd, len, err);
return NXT_ERROR;
}
nxt_log_error(NXT_LOG_ERR, r->log,
"client prematurely closed connection");
return NXT_ERROR;
}
r->body_rest -= n;
return NXT_OK;
}
nxt_int_t
nxt_app_write(nxt_app_request_t *r, const u_char *data, size_t len)
{
size_t free;
nxt_err_t err;
nxt_buf_t *b, *out, **next;
nxt_uint_t bufs;
out = NULL;
next = &out;
b = r->output_buf;
if (b == NULL) {
bufs = 0;
goto get_buf;
}
bufs = 1;
for ( ;; ) {
free = nxt_buf_mem_free_size(&b->mem);
if (free > len) {
b->mem.free = nxt_cpymem(b->mem.free, data, len);
break;
}
b->mem.free = nxt_cpymem(b->mem.free, data, free);
data += free;
len -= free;
*next = b;
next = &b->next;
if (len == 0) {
b = NULL;
break;
}
if (bufs == nxt_app_buf_max_number) {
bufs = 0;
*next = NULL;
nxt_event_engine_post(nxt_app_engine, nxt_app_delivery_handler,
r->event_conn, out, &nxt_main_log);
out = NULL;
next = &out;
}
get_buf:
if (nxt_slow_path(nxt_thread_mutex_lock(&nxt_app_mutex) != NXT_OK)) {
return NXT_ERROR;
}
for ( ;; ) {
b = nxt_app_buf_free;
if (b != NULL) {
nxt_app_buf_free = b->next;
break;
}
if (nxt_app_buf_current_number < nxt_app_buf_max_number) {
break;
}
err = nxt_thread_cond_wait(&nxt_app_cond, &nxt_app_mutex,
NXT_INFINITE_NSEC);
if (nxt_slow_path(err != 0)) {
(void) nxt_thread_mutex_unlock(&nxt_app_mutex);
return NXT_ERROR;
}
}
(void) nxt_thread_mutex_unlock(&nxt_app_mutex);
if (b == NULL) {
b = nxt_buf_mem_alloc(nxt_app_mem_pool, 4096, 0);
if (nxt_slow_path(b == NULL)) {
return NXT_ERROR;
}
b->completion_handler = nxt_app_buf_complettion;
nxt_app_buf_current_number++;
}
bufs++;
}
r->output_buf = b;
if (out != NULL) {
*next = NULL;
nxt_event_engine_post(nxt_app_engine, nxt_app_delivery_handler,
r->event_conn, out, &nxt_main_log);
}
return NXT_OK;
}
static nxt_int_t
nxt_app_write_finish(nxt_app_request_t *r)
{
nxt_buf_t *b, *out;
b = nxt_buf_sync_alloc(r->mem_pool, NXT_BUF_SYNC_LAST);
if (nxt_slow_path(b == NULL)) {
return NXT_ERROR;
}
b->completion_handler = nxt_app_buf_complettion;
b->parent = (nxt_buf_t *) r;
out = r->output_buf;
if (out != NULL) {
r->output_buf = NULL;
out->next = b;
} else {
out = b;
}
nxt_event_engine_post(nxt_app_engine, nxt_app_delivery_handler,
r->event_conn, out, &nxt_main_log);
return NXT_OK;
}
static void
nxt_app_buf_complettion(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *b;
b = obj;
nxt_log_debug(thr->log, "app buf completion");
b->next = nxt_app_buf_done;
nxt_app_buf_done = b;
}
static void
nxt_app_delivery_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *b;
nxt_mem_pool_t *mp;
nxt_event_conn_t *c;
c = obj;
b = data;
nxt_log_debug(thr->log, "app delivery handler");
if (c->write != NULL) {
nxt_buf_chain_add(&c->write, b);
return;
}
if (c->mem_pool == NULL) {
mp = nxt_mem_pool_create(256);
if (nxt_slow_path(mp == NULL)) {
close(c->socket.fd);
return;
}
c->mem_pool = mp;
nxt_app_conn_update(thr, c, &nxt_main_log);
}
if (c->socket.timedout || c->socket.error != 0) {
nxt_buf_chain_add(&nxt_app_buf_done, b);
nxt_thread_work_queue_add(thr, c->write_work_queue,
nxt_app_delivery_complettion, c, NULL,
thr->log);
return;
}
c->write = b;
c->write_state = &nxt_app_delivery_write_state;
nxt_event_conn_write(thr, c);
}
static const nxt_event_conn_state_t nxt_app_delivery_write_state
nxt_aligned(64) =
{
NXT_EVENT_BUF_PROCESS,
NXT_EVENT_TIMER_AUTORESET,
nxt_app_delivery_ready,
NULL,
nxt_app_delivery_error,
nxt_app_delivery_timeout,
nxt_app_delivery_timer_value,
0,
};
static void
nxt_app_delivery_ready(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_t *c;
c = obj;
nxt_thread_work_queue_add(thr, c->write_work_queue,
nxt_app_delivery_complettion, c, NULL, thr->log);
}
static void
nxt_app_delivery_complettion(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *b, *bn, *free;
nxt_app_request_t *r;
nxt_log_debug(thr->log, "app delivery complettion");
free = NULL;
for (b = nxt_app_buf_done; b; b = bn) {
bn = b->next;
if (nxt_buf_is_mem(b)) {
b->mem.pos = b->mem.start;
b->mem.free = b->mem.start;
b->next = free;
free = b;
continue;
}
if (nxt_buf_is_last(b)) {
r = (nxt_app_request_t *) b->parent;
nxt_app_close_request(thr, r);
}
}
nxt_app_buf_done = NULL;
if (free == NULL) {
return;
}
if (nxt_slow_path(nxt_thread_mutex_lock(&nxt_app_mutex) != NXT_OK)) {
return;
}
nxt_buf_chain_add(&nxt_app_buf_free, free);
(void) nxt_thread_mutex_unlock(&nxt_app_mutex);
nxt_thread_time_update(thr);
(void) nxt_thread_cond_signal(&nxt_app_cond);
}
static void
nxt_app_delivery_error(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_t *c;
c = obj;
nxt_log_debug(thr->log, "app delivery error");
nxt_app_delivery_done(thr, c);
}
static void
nxt_app_delivery_timeout(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_t *c;
c = obj;
nxt_log_debug(thr->log, "app delivery timeout");
nxt_app_delivery_done(thr, c);
}
static nxt_msec_t
nxt_app_delivery_timer_value(nxt_event_conn_t *c, uintptr_t data)
{
/* 30000 ms */
return 30000;
}
static void
nxt_app_delivery_done(nxt_thread_t *thr, nxt_event_conn_t *c)
{
if (c->write == NULL) {
return;
}
nxt_buf_chain_add(&nxt_app_buf_done, c->write);
c->write = NULL;
nxt_thread_work_queue_add(thr, c->write_work_queue,
nxt_app_delivery_complettion, c, NULL, thr->log);
}
static void
nxt_app_close_request(nxt_thread_t *thr, nxt_app_request_t *r)
{
nxt_event_conn_t *c;
nxt_log_debug(thr->log, "app close connection");
c = r->event_conn;
nxt_event_conn_close(thr, c);
nxt_mem_pool_destroy(c->mem_pool);
nxt_mem_pool_destroy(r->mem_pool);
}

60
src/nxt_application.h Normal file
View File

@@ -0,0 +1,60 @@
/*
* Copyright (C) Valentin V. Bartenev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_APPLICATION_H_INCLUDED_
#define _NXT_APPLICATION_H_INCLUDED_
typedef struct {
nxt_str_t name;
nxt_str_t value;
} nxt_app_header_field_t;
typedef struct {
nxt_str_t method;
nxt_str_t path;
nxt_str_t version;
nxt_uint_t fields_num;
nxt_app_header_field_t *fields;
nxt_str_t *content_length;
nxt_str_t *content_type;
} nxt_app_request_header_t;
typedef struct {
nxt_event_engine_t *engine;
nxt_mem_pool_t *mem_pool;
nxt_event_conn_t *event_conn;
nxt_log_t *log;
nxt_buf_t *output_buf;
nxt_app_request_header_t header;
nxt_str_t body_preread;
off_t body_rest;
void *ctx;
} nxt_app_request_t;
typedef struct {
nxt_int_t (*init)(nxt_thread_t *thr);
nxt_int_t (*start)(nxt_app_request_t *r);
nxt_int_t (*header)(nxt_app_request_t *r,
nxt_app_header_field_t *field);
nxt_int_t (*run)(nxt_app_request_t *r);
} nxt_application_module_t;
extern nxt_application_module_t nxt_python_module;
nxt_int_t nxt_app_http_read_body(nxt_app_request_t *r, u_char *data, size_t len);
nxt_int_t nxt_app_write(nxt_app_request_t *r, const u_char *data, size_t len);
#endif /* _NXT_APPLICATION_H_INCLIDED_ */

96
src/nxt_array.c Normal file
View File

@@ -0,0 +1,96 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
nxt_array_t *
nxt_array_create(nxt_mem_pool_t *mp, nxt_uint_t n, size_t size)
{
nxt_array_t *array;
array = nxt_mem_alloc(mp, sizeof(nxt_array_t) + n * size);
if (nxt_slow_path(array == NULL)) {
return NULL;
}
array->elts = (char *) array + sizeof(nxt_array_t);
array->nelts = 0;
array->size = size;
array->nalloc = n;
array->mem_pool = mp;
return array;
}
void *
nxt_array_add(nxt_array_t *array)
{
void *p;
uint32_t nalloc, new_alloc;
nalloc = array->nalloc;
if (array->nelts == nalloc) {
if (nalloc < 16) {
/* Allocate new array twice larger than current. */
new_alloc = nalloc * 2;
} else {
/* Allocate new array 1.5 times larger than current. */
new_alloc = nalloc + nalloc / 2;
}
p = nxt_mem_alloc(array->mem_pool, array->size * new_alloc);
if (nxt_slow_path(p == NULL)) {
return NULL;
}
nxt_memcpy(p, array->elts, array->size * nalloc);
array->elts = p;
array->nalloc = new_alloc;
}
p = (char *) array->elts + array->size * array->nelts;
array->nelts++;
return p;
}
void *
nxt_array_zero_add(nxt_array_t *array)
{
void *p;
p = nxt_array_add(array);
if (nxt_fast_path(p != NULL)) {
nxt_memzero(p, array->size);
}
return p;
}
void
nxt_array_remove(nxt_array_t *array, void *elt)
{
void *last;
last = nxt_array_last(array);
if (elt != last) {
nxt_memcpy(elt, last, array->size);
}
array->nelts--;
}

51
src/nxt_array.h Normal file
View File

@@ -0,0 +1,51 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_ARRAY_H_INCLUDED_
#define _NXT_ARRAY_H_INCLUDED_
typedef struct {
void *elts;
/* nelts has uint32_t type because it is used most often. */
uint32_t nelts;
uint16_t size;
uint16_t nalloc;
nxt_mem_pool_t *mem_pool;
} nxt_array_t;
NXT_EXPORT nxt_array_t *nxt_array_create(nxt_mem_pool_t *mp, nxt_uint_t n,
size_t size);
NXT_EXPORT void *nxt_array_add(nxt_array_t *array);
NXT_EXPORT void *nxt_array_zero_add(nxt_array_t *array);
NXT_EXPORT void nxt_array_remove(nxt_array_t *array, void *elt);
#define \
nxt_array_last(array) \
((void *) ((char *) (array)->elts + (array)->size * ((array)->nelts - 1)))
#define \
nxt_array_reset(array) \
(array)->nelts = 0;
#define \
nxt_array_is_empty(array) \
((array)->nelts == 0)
nxt_inline void *
nxt_array_remove_last(nxt_array_t *array)
{
array->nelts--;
return (char *) array->elts + array->size * array->nelts;
}
#endif /* _NXT_ARRAY_H_INCLUDED_ */

268
src/nxt_atomic.h Normal file
View File

@@ -0,0 +1,268 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_ATOMIC_H_INCLUDED_
#define _NXT_ATOMIC_H_INCLUDED_
/*
* nxt_atomic_try_lock() must set an acquire barrier on lock.
* nxt_atomic_xchg() must set an acquire barrier.
* nxt_atomic_release() must set a release barrier.
*/
#if (NXT_HAVE_GCC_ATOMIC) /* GCC 4.1 builtin atomic operations */
typedef intptr_t nxt_atomic_int_t;
typedef uintptr_t nxt_atomic_uint_t;
typedef volatile nxt_atomic_uint_t nxt_atomic_t;
/*
* __sync_bool_compare_and_swap() is a full barrier.
* __sync_lock_test_and_set() is an acquire barrier.
* __sync_lock_release() is a release barrier.
*/
#define \
nxt_atomic_cmp_set(lock, cmp, set) \
__sync_bool_compare_and_swap(lock, cmp, set)
#define \
nxt_atomic_xchg(lock, set) \
__sync_lock_test_and_set(lock, set)
#define \
nxt_atomic_fetch_add(value, add) \
__sync_fetch_and_add(value, add)
#define \
nxt_atomic_try_lock(lock) \
nxt_atomic_cmp_set(lock, 0, 1)
#define \
nxt_atomic_release(lock) \
__sync_lock_release(lock)
#if (__i386__ || __i386 || __amd64__ || __amd64)
#define \
nxt_cpu_pause() \
__asm__ ("pause")
#else
#define \
nxt_cpu_pause()
#endif
#elif (NXT_HAVE_SOLARIS_ATOMIC) /* Solaris 10 */
#include <atomic.h>
typedef long nxt_atomic_int_t;
typedef ulong_t nxt_atomic_uint_t;
typedef volatile nxt_atomic_uint_t nxt_atomic_t;
#define \
nxt_atomic_cmp_set(lock, cmp, set) \
(atomic_cas_ulong(lock, cmp, set) == (ulong_t) cmp)
#define \
nxt_atomic_xchg(lock, set) \
atomic_add_swap(lock, set)
#define \
nxt_atomic_fetch_add(value, add) \
(atomic_add_long_nv(value, add) - add)
/*
* Solaris uses SPARC Total Store Order model. In this model:
* 1) Each atomic load-store instruction behaves as if it were followed by
* #LoadLoad, #LoadStore, and #StoreStore barriers.
* 2) Each load instruction behaves as if it were followed by
* #LoadLoad and #LoadStore barriers.
* 3) Each store instruction behaves as if it were followed by
* #StoreStore barrier.
*
* In X86_64 atomic instructions set a full barrier and usual instructions
* set implicit #LoadLoad, #LoadStore, and #StoreStore barriers.
*
* An acquire barrier requires at least #LoadLoad and #LoadStore barriers
* and they are provided by atomic load-store instruction.
*
* A release barrier requires at least #LoadStore and #StoreStore barriers,
* so a lock release does not require an explicit barrier: all load
* instructions in critical section is followed by implicit #LoadStore
* barrier and all store instructions are followed by implicit #StoreStore
* barrier.
*/
#define \
nxt_atomic_try_lock(lock) \
nxt_atomic_cmp_set(lock, 0, 1)
#define \
nxt_atomic_release(lock) \
*lock = 0;
/*
* The "rep; nop" is used instead of "pause" to omit the "[ PAUSE ]" hardware
* capability added by linker since Solaris ld.so.1 does not know about it:
*
* ld.so.1: ...: fatal: hardware capability unsupported: 0x2000 [ PAUSE ]
*/
#if (__i386__ || __i386 || __amd64__ || __amd64)
#define \
nxt_cpu_pause() \
__asm__ ("rep; nop")
#else
#define \
nxt_cpu_pause()
#endif
/* elif (NXT_HAVE_MACOSX_ATOMIC) */
/*
* The atomic(3) interface has been introduced in MacOS 10.4 (Tiger) and
* extended in 10.5 (Leopard). However its support is omitted because:
*
* 1) the interface is still incomplete:
* *) there are OSAtomicAdd32Barrier() and OSAtomicAdd64Barrier()
* but no OSAtomicAddLongBarrier();
* *) there is no interface for XCHG operation.
*
* 2) the interface is tuned for non-SMP systems due to omission of the
* LOCK prefix on single CPU system but nowadays MacOSX systems are at
* least dual core. Thus these indirect calls just add overhead as
* compared with inlined atomic operations which are supported by GCC
* and Clang in modern MacOSX systems.
*/
#elif (NXT_HAVE_XLC_ATOMIC) /* XL C/C++ V8.0 for AIX */
#if (NXT_64BIT)
typedef long nxt_atomic_int_t;
typedef unsigned long nxt_atomic_uint_t;
typedef volatile nxt_atomic_int_t nxt_atomic_t;
nxt_inline nxt_bool_t
nxt_atomic_cmp_set(nxt_atomic_t *lock, nxt_atomic_int_t cmp,
nxt_atomic_int_t set)
{
nxt_atomic_int_t old;
old = cmp;
return __compare_and_swaplp(lock, &old, set);
}
#define \
nxt_atomic_xchg(lock, set) \
__fetch_and_swaplp(lock, set)
#define \
nxt_atomic_fetch_add(value, add) \
__fetch_and_addlp(value, add)
#else /* NXT_32BIT */
typedef int nxt_atomic_int_t;
typedef unsigned int nxt_atomic_uint_t;
typedef volatile nxt_atomic_int_t nxt_atomic_t;
nxt_inline nxt_bool_t
nxt_atomic_cmp_set(nxt_atomic_t *lock, nxt_atomic_int_t cmp,
nxt_atomic_int_t set)
{
nxt_atomic_int_t old;
old = cmp;
return __compare_and_swap(lock, &old, set);
}
#define \
nxt_atomic_xchg(lock, set) \
__fetch_and_swap(lock, set)
#define \
nxt_atomic_fetch_add(value, add) \
__fetch_and_add(value, add)
#endif /* NXT_32BIT*/
/*
* __lwsync() is a "lwsync" instruction that sets #LoadLoad, #LoadStore,
* and #StoreStore barrier.
*
* __compare_and_swap() is a pair of "ldarx" and "stdcx" instructions.
* A "lwsync" does not set #StoreLoad barrier so it can not be used after
* this pair since a next load inside critical section can be performed
* after the "ldarx" instruction but before the "stdcx" instruction.
* However, this next load instruction will load correct data because
* otherwise the "ldarx/stdcx" pair will fail and this data will be
* discarded. Nevertheless, the "isync" instruction is used for sure.
*
* A full barrier can be set with __sync(), a "sync" instruction, but there
* is also a faster __isync(), an "isync" instruction. This instruction is
* not a memory barrier but an instruction barrier. An "isync" instruction
* causes the processor to complete execution of all previous instructions
* and then to discard instructions (which may have begun execution) following
* the "isync". After the "isync" is executed, the following instructions
* then begin execution. The "isync" is used to ensure that the loads
* following entry into a critical section are not performed (because of
* aggressive out-of-order or speculative execution in the processor) until
* the lock is granted.
*/
nxt_inline nxt_bool_t
nxt_atomic_try_lock(nxt_atomic_t *lock)
{
if (nxt_atomic_cmp_set(lock, 0, 1)) {
__isync();
return 1;
}
return 0;
}
#define \
nxt_atomic_release(lock) \
do { __lwsync(); *lock = 0; } while (0)
#define \
nxt_cpu_pause()
#endif /* NXT_HAVE_XLC_ATOMIC */
#endif /* _NXT_ATOMIC_H_INCLUDED_ */

171
src/nxt_buf.c Normal file
View File

@@ -0,0 +1,171 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static void nxt_buf_completion(nxt_thread_t *thr, void *obj, void *data);
nxt_buf_t *
nxt_buf_mem_alloc(nxt_mem_pool_t *mp, size_t size, nxt_uint_t flags)
{
nxt_buf_t *b;
b = nxt_mem_cache_zalloc0(mp, NXT_BUF_MEM_SIZE);
if (nxt_slow_path(b == NULL)) {
return NULL;
}
b->data = mp;
b->completion_handler = nxt_buf_completion;
b->size = NXT_BUF_MEM_SIZE;
if (size != 0) {
b->mem.start = nxt_mem_buf(mp, &size, flags);
if (nxt_slow_path(b->mem.start == NULL)) {
return NULL;
}
b->mem.pos = b->mem.start;
b->mem.free = b->mem.start;
b->mem.end = b->mem.start + size;
}
return b;
}
nxt_buf_t *
nxt_buf_file_alloc(nxt_mem_pool_t *mp, size_t size, nxt_uint_t flags)
{
nxt_buf_t *b;
b = nxt_mem_cache_zalloc0(mp, NXT_BUF_FILE_SIZE);
if (nxt_slow_path(b == NULL)) {
return NULL;
}
b->data = mp;
b->completion_handler = nxt_buf_completion;
b->size = NXT_BUF_FILE_SIZE;
nxt_buf_set_file(b);
if (size != 0) {
b->mem.start = nxt_mem_buf(mp, &size, flags);
if (nxt_slow_path(b->mem.start == NULL)) {
return NULL;
}
b->mem.pos = b->mem.start;
b->mem.free = b->mem.start;
b->mem.end = b->mem.start + size;
}
return b;
}
nxt_buf_t *
nxt_buf_mmap_alloc(nxt_mem_pool_t *mp, size_t size)
{
nxt_buf_t *b;
b = nxt_mem_cache_zalloc0(mp, NXT_BUF_MMAP_SIZE);
if (nxt_fast_path(b != NULL)) {
b->data = mp;
b->completion_handler = nxt_buf_completion;
b->size = NXT_BUF_MMAP_SIZE;
nxt_buf_set_file(b);
nxt_buf_set_mmap(b);
nxt_buf_mem_set_size(&b->mem, size);
}
return b;
}
nxt_buf_t *
nxt_buf_sync_alloc(nxt_mem_pool_t *mp, nxt_uint_t flags)
{
nxt_buf_t *b;
b = nxt_mem_cache_zalloc0(mp, NXT_BUF_SYNC_SIZE);
if (nxt_fast_path(b != NULL)) {
b->data = mp;
b->completion_handler = nxt_buf_completion;
b->size = NXT_BUF_SYNC_SIZE;
nxt_buf_set_sync(b);
b->is_nobuf = ((flags & NXT_BUF_SYNC_NOBUF) != 0);
b->is_flush = ((flags & NXT_BUF_SYNC_FLUSH) != 0);
b->is_last = ((flags & NXT_BUF_SYNC_LAST) != 0);
}
return b;
}
void
nxt_buf_chain_add(nxt_buf_t **head, nxt_buf_t *in)
{
nxt_buf_t *b, **prev;
prev = head;
for (b = *head; b != NULL; b = b->next) {
prev = &b->next;
}
*prev = in;
}
size_t
nxt_buf_chain_length(nxt_buf_t *b)
{
size_t length;
length = 0;
while (b != NULL) {
length += b->mem.free - b->mem.pos;
b = b->next;
}
return length;
}
static void
nxt_buf_completion(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *b, *parent;
nxt_mem_pool_t *mp;
b = obj;
parent = data;
nxt_log_debug(thr->log, "buf completion: %p %p", b, b->mem.start);
mp = b->data;
nxt_buf_free(mp, b);
if (parent != NULL) {
nxt_log_debug(thr->log, "parent retain:%uD", parent->retain);
parent->retain--;
if (parent->retain == 0) {
parent->mem.pos = parent->mem.free;
parent->completion_handler(thr, parent, parent->parent);
}
}
}

246
src/nxt_buf.h Normal file
View File

@@ -0,0 +1,246 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_BUF_H_INCLUDED_
#define _NXT_BUF_H_INCLUDED_
/*
* There are four types of buffers. They are different sizes, so they
* should be allocated by appropriate nxt_buf_XXX_alloc() function.
*
* 1) Memory-only buffers, their size is less than nxt_buf_t size, it
* is equal to offsetof(nxt_buf_t, file_pos), that is it is nxt_buf_t
* without file and mmap part. The buffers are frequently used, so
* the reduction allows to save 20-32 bytes depending on platform.
*
* 2) Memory/file buffers, on Unix their size is exactly nxt_buf_t size,
* since nxt_mem_map_file_ctx_t() is empty macro. On Windows the size
* equals offsetof(nxt_buf_t, mmap), that is it is nxt_buf_t without
* memory map context part. The buffers can contain both memory and
* file pointers at once, or only memory or file pointers.
*
* 3) Memory mapped buffers are similar to the memory/file buffers. Their
* size is exactly nxt_buf_t size. The buffers can contain both memory
* and file pointers at once, or only memory or file pointers. If a
* buffer is not currently mapped in memory, its mapping size is stored
* in the mem.end field and available via nxt_buf_mem_size() macro.
*
* 4) Sync buffers, their size is the same size as memory-only buffers
* size. A sync buffer can be smaller but for memory pool cache
* purpose it is better to allocate it as frequently used memory-only
* buffer. The buffers are used to synchronize pipeline processing
* completion, because data buffers in the pipeline can be completed
* and freed before their final output will even be passed to a peer.
* For this purpose a sync buffer is allocated with the stop flag which
* stops buffer chain completion processing on the sync buffer in
* nxt_sendbuf_update() and nxt_sendbuf_completion().
* Clearing the stop flag allows to continue completion processing.
*
* The last flag means the end of the output and must be set only
* in a sync buffer. The last flag is not permitted in memory and
* file buffers since it requires special handling while conversion
* one buffer to another.
*
* The nxt_buf_used_size() macro treats a sync buffer as a memory-only
* buffer which has NULL pointers, thus the buffer content size is zero.
* If allocated size of sync buffer would be lesser than memory-only
* buffer, then the special memory flag would be required because
* currently presence of memory part is indicated by non-NULL pointer
* to a content in memory.
*
* All types of buffers can have the flush flag that means the buffer
* should be sent as much as possible.
*/
typedef struct {
u_char *pos;
u_char *free;
u_char *start;
u_char *end;
} nxt_buf_mem_t;
struct nxt_buf_s {
void *data;
nxt_work_handler_t completion_handler;
nxt_buf_t *parent;
/*
* The next link, flags, and nxt_buf_mem_t should
* reside together to improve cache locality.
*/
nxt_buf_t *next;
uint32_t retain;
/*
* Used by nxt_mem_cache_free() to return buffer
* in appropriate memory pool cache.
*/
uint8_t size;
uint8_t is_file; /* 1 bit */
uint16_t is_mmap:1;
uint16_t is_sync:1;
uint16_t is_nobuf:1;
uint16_t is_flush:1;
uint16_t is_last:1;
nxt_buf_mem_t mem;
/* The file and mmap parts are not allocated by nxt_buf_mem_alloc(). */
nxt_file_t *file;
nxt_off_t file_pos;
nxt_off_t file_end;
/* The mmap part is not allocated by nxt_buf_file_alloc(). */
nxt_mem_map_file_ctx_t (mmap)
};
#define NXT_BUF_MEM_SIZE offsetof(nxt_buf_t, file)
#define NXT_BUF_SYNC_SIZE NXT_BUF_MEM_SIZE
#define NXT_BUF_MMAP_SIZE sizeof(nxt_buf_t)
#define NXT_BUF_FILE_SIZE sizeof(nxt_buf_t)
#define NXT_BUF_SYNC_NOBUF 1
#define NXT_BUF_SYNC_FLUSH 2
#define NXT_BUF_SYNC_LAST 4
#define \
nxt_buf_is_mem(b) \
((b)->mem.pos != NULL)
#define \
nxt_buf_is_file(b) \
((b)->is_file)
#define \
nxt_buf_set_file(b) \
(b)->is_file = 1
#define \
nxt_buf_clear_file(b) \
(b)->is_file = 0
#define \
nxt_buf_is_mmap(b) \
((b)->is_mmap)
#define \
nxt_buf_set_mmap(b) \
(b)->is_mmap = 1
#define \
nxt_buf_clear_mmap(b) \
(b)->is_mmap = 0
#define \
nxt_buf_is_sync(b) \
((b)->is_sync)
#define \
nxt_buf_set_sync(b) \
(b)->is_sync = 1
#define \
nxt_buf_clear_sync(b) \
(b)->is_sync = 0
#define \
nxt_buf_is_nobuf(b) \
((b)->is_nobuf)
#define \
nxt_buf_set_nobuf(b) \
(b)->is_nobuf = 1
#define \
nxt_buf_clear_nobuf(b) \
(b)->is_nobuf = 0
#define \
nxt_buf_is_flush(b) \
((b)->is_flush)
#define \
nxt_buf_set_flush(b) \
(b)->is_flush = 1
#define \
nxt_buf_clear_flush(b) \
(b)->is_flush = 0
#define \
nxt_buf_is_last(b) \
((b)->is_last)
#define \
nxt_buf_set_last(b) \
(b)->is_last = 1
#define \
nxt_buf_clear_last(b) \
(b)->is_last = 0
#define \
nxt_buf_mem_set_size(bm, size) \
do { \
(bm)->start = 0; \
(bm)->end = (void *) size; \
} while (0)
#define \
nxt_buf_mem_size(bm) \
((bm)->end - (bm)->start)
#define \
nxt_buf_mem_used_size(bm) \
((bm)->free - (bm)->pos)
#define \
nxt_buf_mem_free_size(bm) \
((bm)->end - (bm)->free)
#define \
nxt_buf_used_size(b) \
(nxt_buf_is_file(b) ? (b)->file_end - (b)->file_pos: \
nxt_buf_mem_used_size(&(b)->mem))
NXT_EXPORT nxt_buf_t *nxt_buf_mem_alloc(nxt_mem_pool_t *mp, size_t size,
nxt_uint_t flags);
NXT_EXPORT nxt_buf_t *nxt_buf_file_alloc(nxt_mem_pool_t *mp, size_t size,
nxt_uint_t flags);
NXT_EXPORT nxt_buf_t *nxt_buf_mmap_alloc(nxt_mem_pool_t *mp, size_t size);
NXT_EXPORT nxt_buf_t *nxt_buf_sync_alloc(nxt_mem_pool_t *mp, nxt_uint_t flags);
#define \
nxt_buf_free(mp, b) \
nxt_mem_cache_free0((mp), (b), (b)->size)
NXT_EXPORT void nxt_buf_chain_add(nxt_buf_t **head, nxt_buf_t *in);
NXT_EXPORT size_t nxt_buf_chain_length(nxt_buf_t *b);
#endif /* _NXT_BUF_H_INCLIDED_ */

448
src/nxt_buf_filter.c Normal file
View File

@@ -0,0 +1,448 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static nxt_int_t nxt_buf_filter_nobuf(nxt_buf_filter_t *f);
nxt_inline void nxt_buf_filter_next(nxt_buf_filter_t *f);
static void nxt_buf_filter_file_read_start(nxt_thread_t *thr,
nxt_buf_filter_t *f);
static void nxt_buf_filter_file_read(nxt_thread_t *thr, nxt_buf_filter_t *f);
static void nxt_buf_filter_file_job_completion(nxt_thread_t *thr,
void *obj, void *data);
static void nxt_buf_filter_buf_completion(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_buf_filter_file_read_error(nxt_thread_t *thr, void *obj,
void *data);
void
nxt_buf_filter_add(nxt_thread_t *thr, nxt_buf_filter_t *f, nxt_buf_t *b)
{
nxt_buf_chain_add(&f->input, b);
nxt_buf_filter(thr, f, NULL);
}
void
nxt_buf_filter(nxt_thread_t *thr, void *obj, void *data)
{
nxt_int_t ret;
nxt_buf_t *b;
nxt_buf_filter_t *f;
f = obj;
nxt_log_debug(thr->log, "buf filter");
if (f->done) {
return;
}
f->queued = 0;
for ( ;; ) {
/*
* f->input is a chain of original incoming buffers: memory,
* mapped, file, and sync buffers;
* f->current is a currently processed memory buffer or a chain
* of memory/file or mapped/file buffers which are read of
* or populated from file;
* f->output is a chain of output buffers;
* f->last is the last output buffer in the chain.
*/
b = f->current;
nxt_log_debug(thr->log, "buf filter current: %p", b);
if (b == NULL) {
if (f->reading) {
return;
}
b = f->input;
nxt_log_debug(thr->log, "buf filter input: %p", b);
if (b == NULL) {
/*
* The end of the input chain, pass
* the output chain to the next filter.
*/
nxt_buf_filter_next(f);
return;
}
if (nxt_buf_is_mem(b)) {
f->current = b;
f->input = b->next;
b->next = NULL;
} else if (nxt_buf_is_file(b)) {
if (f->run->filter_ready(f) != NXT_OK) {
nxt_buf_filter_next(f);
}
nxt_buf_filter_file_read_start(thr, f);
return;
}
}
if (nxt_buf_is_sync(b)) {
ret = NXT_OK;
f->current = b;
f->input = b->next;
b->next = NULL;
if (nxt_buf_is_nobuf(b)) {
ret = f->run->filter_sync_nobuf(f);
} else if (nxt_buf_is_flush(b)) {
ret = f->run->filter_sync_flush(f);
} else if (nxt_buf_is_last(b)) {
ret = f->run->filter_sync_last(f);
f->done = (ret == NXT_OK);
}
if (nxt_fast_path(ret == NXT_OK)) {
continue;
}
if (nxt_slow_path(ret == NXT_ERROR)) {
goto fail;
}
/* ret == NXT_AGAIN: No filter internal buffers available. */
goto nobuf;
}
ret = f->run->filter_process(f);
if (nxt_fast_path(ret == NXT_OK)) {
b = f->current;
/*
* A filter may just move f->current to f->output
* and then set f->current to NULL.
*/
if (b != NULL && b->mem.pos == b->mem.free) {
f->current = b->next;
nxt_thread_work_queue_add(thr, f->work_queue,
b->completion_handler,
b, b->parent, thr->log);
}
continue;
}
if (nxt_slow_path(ret == NXT_ERROR)) {
goto fail;
}
/* ret == NXT_AGAIN: No filter internal buffers available. */
goto nobuf;
}
nobuf:
/* ret == NXT_AGAIN: No filter internal buffers available. */
if (nxt_buf_filter_nobuf(f) == NXT_OK) {
return;
}
fail:
nxt_thread_work_queue_add(thr, f->work_queue, f->run->filter_error,
f, f->data, thr->log);
}
static nxt_int_t
nxt_buf_filter_nobuf(nxt_buf_filter_t *f)
{
nxt_buf_t *b;
nxt_thread_log_debug("buf filter nobuf");
b = nxt_buf_sync_alloc(f->mem_pool, NXT_BUF_SYNC_NOBUF);
if (nxt_fast_path(b != NULL)) {
nxt_buf_chain_add(&f->output, b);
f->last = NULL;
f->run->filter_next(f);
f->output = NULL;
return NXT_OK;
}
return NXT_ERROR;
}
nxt_inline void
nxt_buf_filter_next(nxt_buf_filter_t *f)
{
if (f->output != NULL) {
f->last = NULL;
f->run->filter_next(f);
f->output = NULL;
}
}
void
nxt_buf_filter_enqueue(nxt_thread_t *thr, nxt_buf_filter_t *f)
{
nxt_log_debug(thr->log, "buf filter enqueue: %d", f->queued);
if (!f->queued && !f->done) {
f->queued = 1;
nxt_thread_work_queue_add(thr, f->work_queue, nxt_buf_filter,
f, NULL, thr->log);
}
}
static void
nxt_buf_filter_file_read_start(nxt_thread_t *thr, nxt_buf_filter_t *f)
{
nxt_job_file_t *jbf;
nxt_buf_filter_file_t *ff;
ff = f->run->job_file_create(f);
if (nxt_slow_path(ff == NULL)) {
nxt_thread_work_queue_add(thr, f->work_queue, f->run->filter_error,
f, f->data, thr->log);
return;
}
f->filter_file = ff;
jbf = &ff->job_file;
jbf->file = *f->input->file;
jbf->ready_handler = nxt_buf_filter_file_job_completion;
jbf->error_handler = nxt_buf_filter_file_read_error;
nxt_job_set_name(&jbf->job, "buf filter job file");
f->reading = 1;
nxt_buf_filter_file_read(thr, f);
}
static void
nxt_buf_filter_file_read(nxt_thread_t *thr, nxt_buf_filter_t *f)
{
nxt_int_t ret;
nxt_off_t size;
nxt_buf_t *b;
nxt_buf_filter_file_t *ff;
ff = f->filter_file;
if (ff->job_file.buffer != NULL) {
/* File is now being read. */
return;
}
size = f->input->file_end - f->input->file_pos;
if (size > (nxt_off_t) NXT_SIZE_T_MAX) {
/*
* Small size value is a hint for buffer pool allocation
* size, but if size of the size_t type is lesser than size
* of the nxt_off_t type, the large size value may be truncated,
* so use a default buffer pool allocation size.
*/
size = 0;
}
if (f->mmap) {
ret = nxt_buf_pool_mmap_alloc(&ff->buffers, (size_t) size);
} else {
ret = nxt_buf_pool_file_alloc(&ff->buffers, (size_t) size);
}
if (nxt_fast_path(ret == NXT_OK)) {
b = ff->buffers.current;
b->file_pos = f->input->file_pos;
b->file_end = f->input->file_pos;
b->file = f->input->file;
ff->job_file.buffer = b;
ff->job_file.offset = f->input->file_pos;
f->run->job_file_retain(f);
nxt_job_file_read(thr, &ff->job_file.job);
return;
}
if (nxt_fast_path(ret != NXT_ERROR)) {
/* ret == NXT_AGAIN: No buffers available. */
if (f->buffering) {
f->buffering = 0;
if (nxt_fast_path(f->run->filter_flush(f) != NXT_ERROR)) {
return;
}
} else if (nxt_fast_path(nxt_buf_filter_nobuf(f) == NXT_OK)) {
return;
}
}
nxt_thread_work_queue_add(thr, f->work_queue, f->run->filter_error,
f, f->data, thr->log);
}
typedef struct {
nxt_buf_filter_t *filter;
nxt_buf_t *buf;
} nxt_buf_filter_ctx_t;
static void
nxt_buf_filter_file_job_completion(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *b;
nxt_bool_t done;
nxt_job_file_t *jbf;
nxt_buf_filter_t *f;
nxt_buf_filter_ctx_t *ctx;
jbf = obj;
f = data;
b = jbf->buffer;
jbf->buffer = NULL;
nxt_log_debug(thr->log, "buf filter file completion: \"%FN\" %O-%O",
jbf->file.name, b->file_pos, b->file_end);
f->run->job_file_release(f);
ctx = nxt_mem_cache_alloc0(f->mem_pool, sizeof(nxt_buf_filter_ctx_t));
if (nxt_slow_path(ctx == NULL)) {
goto fail;
}
ctx->filter = f;
ctx->buf = f->input;
f->input->file_pos = b->file_end;
done = (f->input->file_pos == f->input->file_end);
if (done) {
f->input = f->input->next;
f->reading = 0;
}
b->data = f->data;
b->completion_handler = nxt_buf_filter_buf_completion;
b->parent = (nxt_buf_t *) ctx;
b->next = NULL;
nxt_buf_chain_add(&f->current, b);
nxt_buf_filter(thr, f, NULL);
if (b->mem.pos == b->mem.free) {
/*
* The buffer has been completely processed by nxt_buf_filter(),
* its completion handler has been placed in workqueue and
* nxt_buf_filter_buf_completion() should be eventually called.
*/
return;
}
if (!done) {
/* Try to allocate another buffer and read the next file part. */
nxt_buf_filter_file_read(thr, f);
}
return;
fail:
nxt_thread_work_queue_add(thr, f->work_queue, f->run->filter_error,
f, f->data, thr->log);
}
static void
nxt_buf_filter_buf_completion(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *fb, *b;
nxt_buf_filter_t *f;
nxt_buf_filter_ctx_t *ctx;
b = obj;
ctx = data;
f = ctx->filter;
nxt_log_debug(thr->log, "buf filter completion: %p \"%FN\" %O-%O",
b, f->filter_file->job_file.file.name,
b->file_pos, b->file_end);
/* nxt_http_send_filter() might clear a buffer's file status. */
b->is_file = 1;
fb = ctx->buf;
nxt_mem_cache_free0(f->mem_pool, ctx, sizeof(nxt_buf_filter_ctx_t));
nxt_buf_pool_free(&f->filter_file->buffers, b);
if (fb->file_pos < fb->file_end) {
nxt_buf_filter_file_read(thr, f);
return;
}
if (b->file_end == fb->file_end) {
nxt_buf_pool_destroy(&f->filter_file->buffers);
nxt_job_destroy(&f->filter_file->job_file.job);
nxt_thread_work_queue_add(thr, f->work_queue, fb->completion_handler,
fb, fb->parent, thr->log);
}
nxt_buf_filter(thr, f, NULL);
}
static void
nxt_buf_filter_file_read_error(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_filter_t *f;
f = data;
nxt_thread_work_queue_add(thr, f->work_queue, f->run->filter_error,
f, f->data, thr->log);
}

116
src/nxt_buf_filter.h Normal file
View File

@@ -0,0 +1,116 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_BUF_FILTER_H_INCLUDED_
#define _NXT_BUF_FILTER_H_INCLUDED_
/*
* nxt_buf_filter is a framework intended to simplify processing file
* buffers content by a filter. The filter should set callbacks and
* call nxt_buf_filter_add() to start processing.
*
* At first buf_filter calls filter_ready() and the filter ensures
* it may allocate or reuse its internal buffer. No real allocation
* is performed at this step.
*
* TODO prevent unneeded allocaiton if no input data.
*
*
* TODO: The filter can flush data buffered
* previously, if all internal buffers are full.
*
* Then buf_filter looks buffer chains. There are two buffer chains:
* the input chain is a chain of original incoming memory, file, and sync
* buffers; and the current chain is a chain of memory/file buffers read
* from a file-only buffer. The current chain is processed first. Since
* buffers in this chain always contains a memory part, they can be passed
* one by one to the filter using filter_process(). If there is an output
* buffer after the buffer processing, it is added to output chain. The
* output buffers are not filter internal buffers. They just point to these
* internal buffers and one internal buffer can correspond to several output
* buffers which point to adjoining parts of the internal buffer. Further
* processing depends on filter_process() result code: if it returns NXT_OK,
* then the filter internal buffer is not full and buf_filter looks the next
* current or input buffer. If result code is NXT_AGAIN, then the filter
* internal buffer is full and buf_filter calls filter_flush() and then
* schedules to run nxt_buf_filter_repeat(). nxt_buf_filter_repeat() will
* run after all ready output buffer completion handlers and will call
* buf_filter again if no one completion handler will do it already using
* nxt_buf_filter_enqueue(). So in any case buf_filter will run again only
* once.
*
* TODO:
* in ideal just one the filter internal buffer.
* This allows to minimize number of the filter internal buffers if they
* flush fast.
*
* If the current chain is empty, the buf_filter processes the input chain.
* Memory buffers are passed to the filter using filter_process(). If an
* input buffer is a file buffer, then buf_filter calls filter_flush()
* and starts a file job to read the buffer in memory. The file job reads
* file parts into memory/file buffers and adds them to the current chain.
*
* Sync buffers are passed to the filter using filter_sync(). Its
* post-processing is similar to the filter_process() post-processing,
* except sync buffers are always added unmodified to the output chain.
*/
typedef struct {
nxt_job_file_t job_file;
nxt_buf_pool_t buffers;
} nxt_buf_filter_file_t;
typedef struct nxt_buf_filter_s nxt_buf_filter_t;
typedef struct {
nxt_int_t (*filter_ready)(nxt_buf_filter_t *f);
nxt_int_t (*filter_process)(nxt_buf_filter_t *f);
nxt_int_t (*filter_flush)(nxt_buf_filter_t *f);
nxt_int_t (*filter_sync_nobuf)(nxt_buf_filter_t *f);
nxt_int_t (*filter_sync_flush)(nxt_buf_filter_t *f);
nxt_int_t (*filter_sync_last)(nxt_buf_filter_t *f);
void (*filter_next)(nxt_buf_filter_t *f);
void (*filter_error)(nxt_thread_t *thr, void *obj,
void *data);
nxt_buf_filter_file_t *(*job_file_create)(nxt_buf_filter_t *f);
void (*job_file_retain)(nxt_buf_filter_t *f);
void (*job_file_release)(nxt_buf_filter_t *f);
} nxt_buf_filter_ops_t;
struct nxt_buf_filter_s {
nxt_buf_t *current;
nxt_buf_t *input;
nxt_buf_t *output;
nxt_buf_t *last;
nxt_work_queue_t *work_queue;
nxt_buf_filter_file_t *filter_file;
void *data;
nxt_mem_pool_t *mem_pool;
const nxt_buf_filter_ops_t *run;
uint8_t mmap; /* 1 bit */
uint8_t done; /* 1 bit */
uint8_t queued; /* 1 bit */
uint8_t reading; /* 1 bit */
uint8_t buffering; /* 1 bit */
};
NXT_EXPORT void nxt_buf_filter_add(nxt_thread_t *thr, nxt_buf_filter_t *f,
nxt_buf_t *b);
NXT_EXPORT void nxt_buf_filter(nxt_thread_t *thr, void *obj, void *data);
NXT_EXPORT void nxt_buf_filter_enqueue(nxt_thread_t *thr, nxt_buf_filter_t *f);
#endif /* _NXT_BUF_FILTER_H_INCLUDED_ */

191
src/nxt_buf_pool.c Normal file
View File

@@ -0,0 +1,191 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
nxt_int_t
nxt_buf_pool_mem_alloc(nxt_buf_pool_t *bp, size_t size)
{
nxt_buf_t *b;
b = bp->current;
if (b != NULL && b->mem.free < b->mem.end) {
return NXT_OK;
}
b = bp->free;
if (b != NULL) {
bp->current = b;
bp->free = b->next;
b->next = NULL;
return NXT_OK;
}
if (bp->num >= bp->max) {
return NXT_AGAIN;
}
if (size == 0 || size >= bp->size + bp->size / 4) {
size = bp->size;
}
b = nxt_buf_mem_alloc(bp->mem_pool, size, bp->flags);
if (nxt_fast_path(b != NULL)) {
bp->current = b;
bp->num++;
return NXT_OK;
}
return NXT_ERROR;
}
nxt_int_t
nxt_buf_pool_file_alloc(nxt_buf_pool_t *bp, size_t size)
{
nxt_buf_t *b;
b = bp->current;
if (b != NULL && b->mem.free < b->mem.end) {
return NXT_OK;
}
b = bp->free;
if (b != NULL) {
bp->current = b;
bp->free = b->next;
b->next = NULL;
return NXT_OK;
}
if (bp->num >= bp->max) {
return NXT_AGAIN;
}
if (size == 0 || size >= bp->size + bp->size / 4) {
size = bp->size;
}
b = nxt_buf_file_alloc(bp->mem_pool, size, bp->flags);
if (nxt_fast_path(b != NULL)) {
bp->current = b;
bp->num++;
return NXT_OK;
}
return NXT_ERROR;
}
nxt_int_t
nxt_buf_pool_mmap_alloc(nxt_buf_pool_t *bp, size_t size)
{
nxt_buf_t *b;
b = bp->current;
if (b != NULL) {
return NXT_OK;
}
b = bp->free;
if (b != NULL) {
bp->current = b;
bp->free = b->next;
b->next = NULL;
return NXT_OK;
}
if (bp->num >= bp->max) {
return NXT_AGAIN;
}
if (size == 0 || size >= bp->size + bp->size / 4) {
size = bp->size;
}
b = nxt_buf_mmap_alloc(bp->mem_pool, size);
if (nxt_fast_path(b != NULL)) {
bp->mmap = 1;
bp->current = b;
bp->num++;
return NXT_OK;
}
return NXT_ERROR;
}
void
nxt_buf_pool_free(nxt_buf_pool_t *bp, nxt_buf_t *b)
{
size_t size;
nxt_thread_log_debug("buf pool free: %p %p", b, b->mem.start);
size = nxt_buf_mem_size(&b->mem);
if (bp->mmap) {
nxt_mem_unmap(b->mem.start, &b->mmap, size);
}
if (bp->destroy) {
if (b == bp->current) {
bp->current = NULL;
}
if (!bp->mmap) {
nxt_mem_free(bp->mem_pool, b->mem.start);
}
nxt_buf_free(bp->mem_pool, b);
return;
}
if (bp->mmap) {
b->mem.pos = NULL;
b->mem.free = NULL;
nxt_buf_mem_set_size(&b->mem, size);
} else {
b->mem.pos = b->mem.start;
b->mem.free = b->mem.start;
}
if (b != bp->current) {
b->next = bp->free;
bp->free = b;
}
}
void
nxt_buf_pool_destroy(nxt_buf_pool_t *bp)
{
u_char *p;
nxt_buf_t *b;
bp->destroy = 1;
for (b = bp->free; b != NULL; b = b->next) {
p = b->mem.start;
nxt_buf_free(bp->mem_pool, b);
nxt_mem_free(bp->mem_pool, p);
}
bp->free = b; /* NULL */
}

80
src/nxt_buf_pool.h Normal file
View File

@@ -0,0 +1,80 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_BUF_POOL_H_INCLUDED_
#define _NXT_BUF_POOL_H_INCLUDED_
/*
* nxt_buf_pool_t is intended to allocate up to the "max" number
* memory, memory/file, or mmap/file buffers. A size of the buffers
* is set in the "size" field. The size however can be overridden in
* nxt_buf_pool_XXX_alloc() by the "size" argument if the argument is
* not zero and lesser than or equal to the "size" field multiplied
* by 1.25. The "flags" field is passed as the nxt_mem_buf() flags.
*/
typedef struct {
nxt_buf_t *current;
nxt_buf_t *free;
nxt_mem_pool_t *mem_pool;
uint16_t num;
uint16_t max;
uint32_t size;
uint8_t flags; /* 2 bits */
uint8_t destroy; /* 1 bit */
uint8_t mmap; /* 1 bit */
} nxt_buf_pool_t;
NXT_EXPORT nxt_int_t nxt_buf_pool_mem_alloc(nxt_buf_pool_t *bp, size_t size);
NXT_EXPORT nxt_int_t nxt_buf_pool_file_alloc(nxt_buf_pool_t *bp, size_t size);
NXT_EXPORT nxt_int_t nxt_buf_pool_mmap_alloc(nxt_buf_pool_t *bp, size_t size);
NXT_EXPORT void nxt_buf_pool_free(nxt_buf_pool_t *bp, nxt_buf_t *b);
NXT_EXPORT void nxt_buf_pool_destroy(nxt_buf_pool_t *bp);
/* There is ready free buffer. */
#define \
nxt_buf_pool_ready(bp) \
((bp)->free != NULL \
|| ((bp)->current != NULL \
&& (bp)->current->mem.free < (bp)->current->mem.end))
/* A free buffer is allowed to be allocated. */
#define \
nxt_buf_pool_obtainable(bp) \
((bp)->num < (bp)->max)
/* There is ready free buffer or it is allowed to be allocated. */
#define \
nxt_buf_pool_available(bp) \
(nxt_buf_pool_obtainable(bp) || nxt_buf_pool_ready(bp))
/* Reserve allocation of "n" free buffers as they were allocated. */
#define \
nxt_buf_pool_reserve(bp, n) \
(bp)->num += (n)
/* Release a reservation. */
#define \
nxt_buf_pool_release(bp, n) \
(bp)->num -= (n)
#endif /* _NXT_BUF_POOL_H_INCLUDED_ */

643
src/nxt_cache.c Normal file
View File

@@ -0,0 +1,643 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/* A cache time resolution is 10ms. */
#define \
nxt_cache_time(thr) \
(uint64_t) (nxt_thread_time(thr) * 100)
static nxt_int_t nxt_cache_lvlhsh_test(nxt_lvlhsh_query_t *lhq, void *data);
static nxt_work_handler_t nxt_cache_query_locked(nxt_cache_t *cache,
nxt_cache_query_t *q, nxt_lvlhsh_query_t *lhq);
static nxt_work_handler_t nxt_cache_node_hold(nxt_cache_t *cache,
nxt_cache_query_t *q, nxt_lvlhsh_query_t *lhq);
static nxt_work_handler_t nxt_cache_node_test(nxt_cache_t *cache,
nxt_cache_query_t *q);
static void nxt_cache_wait_handler(nxt_thread_t *thr, void *obj, void *data);
static void nxt_cache_timeout_handler(nxt_thread_t *thr, void *obj, void *data);
static void nxt_cache_wake_handler(nxt_thread_t *thr, void *obj, void *data);
static ssize_t nxt_cache_release_locked(nxt_cache_t *cache,
nxt_cache_query_t *q, u_char *buf, size_t size);
static nxt_cache_node_t *nxt_cache_node_alloc(nxt_cache_t *cache);
static void nxt_cache_node_free(nxt_cache_t *cache, nxt_cache_node_t *node,
nxt_bool_t fast);
static nxt_cache_query_wait_t *nxt_cache_query_wait_alloc(nxt_cache_t *cache,
nxt_bool_t *slow);
static void nxt_cache_query_wait_free(nxt_cache_t *cache,
nxt_cache_query_wait_t *qw);
/* STUB */
nxt_int_t nxt_cache_shm_create(nxt_mem_zone_t *pool);
static void *nxt_cache_shm_alloc(void *data, size_t size, nxt_uint_t nalloc);
/**/
nxt_int_t
nxt_cache_shm_create(nxt_mem_zone_t *mz)
{
nxt_cache_t *cache;
static const nxt_lvlhsh_proto_t proto nxt_aligned(64) = {
NXT_LVLHSH_LARGE_SLAB,
0,
nxt_cache_lvlhsh_test,
(nxt_lvlhsh_alloc_t) nxt_cache_shm_alloc,
(nxt_lvlhsh_free_t) nxt_mem_zone_free,
};
cache = nxt_mem_zone_zalloc(mz, sizeof(nxt_cache_t));
if (cache == NULL) {
return NXT_ERROR;
}
cache->proto = &proto;
cache->pool = mz;
cache->start_time = nxt_cache_time(nxt_thread());
return NXT_OK;
}
static void *
nxt_cache_shm_alloc(void *data, size_t size, nxt_uint_t nalloc)
{
return nxt_mem_zone_align(data, size, size);
}
void
nxt_cache_init(nxt_cache_t *cache)
{
static const nxt_lvlhsh_proto_t proto nxt_aligned(64) = {
NXT_LVLHSH_LARGE_MEMALIGN,
0,
nxt_cache_lvlhsh_test,
nxt_lvlhsh_alloc,
nxt_lvlhsh_free,
};
cache->proto = &proto;
cache->start_time = nxt_cache_time(nxt_thread());
}
static nxt_int_t
nxt_cache_lvlhsh_test(nxt_lvlhsh_query_t *lhq, void *data)
{
nxt_cache_node_t *node;
node = data;
if (nxt_str_eq(&lhq->key, node->key_data, node->key_len)) {
return NXT_OK;
}
return NXT_DECLINED;
}
nxt_inline void
nxt_cache_lock(nxt_cache_t *cache)
{
if (cache->shared) {
nxt_thread_spin_lock(&cache->lock);
}
}
nxt_inline void
nxt_cache_unlock(nxt_cache_t *cache)
{
if (cache->shared) {
nxt_thread_spin_unlock(&cache->lock);
}
}
void
nxt_cache_query(nxt_cache_t *cache, nxt_cache_query_t *q)
{
nxt_thread_t *thr;
nxt_lvlhsh_query_t lhq;
nxt_work_handler_t handler;
thr = nxt_thread();
if (cache != NULL) {
lhq.key_hash = nxt_murmur_hash2(q->key_data, q->key_len);
lhq.replace = 0;
lhq.key.len = q->key_len;
lhq.key.data = q->key_data;
lhq.proto = cache->proto;
lhq.pool = cache->pool;
q->now = nxt_cache_time(thr);
nxt_cache_lock(cache);
handler = nxt_cache_query_locked(cache, q, &lhq);
nxt_cache_unlock(cache);
} else {
handler = q->state->nocache_handler;
}
handler(thr, q, NULL);
}
static nxt_work_handler_t
nxt_cache_query_locked(nxt_cache_t *cache, nxt_cache_query_t *q,
nxt_lvlhsh_query_t *lhq)
{
nxt_int_t ret;
nxt_time_t expiry;
nxt_cache_node_t *node;
nxt_cache_query_state_t *state;
if (q->hold) {
return nxt_cache_node_hold(cache, q, lhq);
}
ret = nxt_lvlhsh_find(&cache->lvlhsh, lhq);
state = q->state;
if (ret != NXT_OK) {
/* NXT_DECLINED */
return state->nocache_handler;
}
node = lhq->value;
node->count++;
q->node = node;
expiry = cache->start_time + node->expiry;
if (q->now < expiry) {
return state->ready_handler;
}
q->stale = 1;
return state->stale_handler;
}
static nxt_work_handler_t
nxt_cache_node_hold(nxt_cache_t *cache, nxt_cache_query_t *q,
nxt_lvlhsh_query_t *lhq)
{
nxt_int_t ret;
nxt_bool_t slow;
nxt_cache_node_t *node, *sentinel;
nxt_work_handler_t handler;
nxt_cache_query_wait_t *qw;
nxt_cache_query_state_t *state;
state = q->state;
sentinel = nxt_cache_node_alloc(cache);
if (nxt_slow_path(sentinel == NULL)) {
return state->error_handler;
}
sentinel->key_data = q->key_data;
sentinel->key_len = q->key_len;
lhq->value = sentinel;
/*
* Try to insert an empty sentinel node to hold updating
* process if there is no existent cache node in cache.
*/
ret = nxt_lvlhsh_insert(&cache->lvlhsh, lhq);
if (ret == NXT_OK) {
/* The sentinel node was successully added. */
q->node = sentinel;
sentinel->updating = 1;
return state->update_handler;
}
nxt_cache_node_free(cache, sentinel, 1);
if (ret == NXT_ERROR) {
return state->error_handler;
}
/* NXT_DECLINED: a cache node exists. */
node = lhq->value;
node->count++;
q->node = node;
handler = nxt_cache_node_test(cache, q);
if (handler != NULL) {
return handler;
}
/* Add the node to a wait queue. */
qw = nxt_cache_query_wait_alloc(cache, &slow);
if (nxt_slow_path(qw == NULL)) {
return state->error_handler;
}
if (slow) {
/* The node state may have been changed during slow allocation. */
handler = nxt_cache_node_test(cache, q);
if (handler != NULL) {
nxt_cache_query_wait_free(cache, qw);
return handler;
}
}
qw->query = q;
qw->next = node->waiting;
qw->busy = 0;
qw->deleted = 0;
qw->pid = nxt_pid;
qw->engine = nxt_thread_event_engine();
qw->handler = nxt_cache_wake_handler;
qw->cache = cache;
node->waiting = qw;
return nxt_cache_wait_handler;
}
static nxt_work_handler_t
nxt_cache_node_test(nxt_cache_t *cache, nxt_cache_query_t *q)
{
nxt_time_t expiry;
nxt_cache_node_t *node;
nxt_cache_query_state_t *state;
q->stale = 0;
state = q->state;
node = q->node;
expiry = cache->start_time + node->expiry;
if (q->now < expiry) {
return state->ready_handler;
}
/*
* A valid stale or empty sentinel cache node.
* The sentinel node can be only in updating state.
*/
if (node->updating) {
if (node->expiry != 0) {
/* A valid stale cache node. */
q->stale = 1;
if (q->use_stale) {
return state->stale_handler;
}
}
/* A sentinel node. */
return NULL;
}
/* A valid stale cache node is not being updated now. */
q->stale = 1;
if (q->use_stale) {
if (q->update_stale) {
node->updating = 1;
return state->update_stale_handler;
}
return state->stale_handler;
}
node->updating = 1;
return state->update_handler;
}
static void
nxt_cache_wait_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_timer_t *ev;
nxt_cache_query_t *cq;
cq = obj;
if (cq->timeout != 0) {
ev = &cq->timer;
if (ev->state == NXT_EVENT_TIMER_DISABLED) {
ev->handler = nxt_cache_timeout_handler;
nxt_event_timer_ident(ev, -1);
nxt_event_timer_add(thr->engine, ev, cq->timeout);
}
}
}
static void
nxt_cache_timeout_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_cache_query_t *cq;
nxt_event_timer_t *ev;
ev = obj;
cq = nxt_event_timer_data(ev, nxt_cache_query_t, timer);
cq->state->timeout_handler(thr, cq, NULL);
}
static void
nxt_cache_wake_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_cache_t *cache;
nxt_work_handler_t handler;
nxt_cache_query_t *q;
nxt_cache_query_wait_t *qw;
qw = obj;
q = qw->query;
cache = qw->cache;
nxt_cache_lock(cache);
handler = nxt_cache_node_test(cache, q);
if (handler != NULL) {
nxt_cache_query_wait_free(cache, qw);
} else {
/* Wait again. */
qw->next = q->node->waiting;
q->node->waiting = qw;
}
nxt_cache_unlock(cache);
handler(thr, q, NULL);
}
nxt_int_t
nxt_cache_update(nxt_cache_t *cache, nxt_cache_query_t *q)
{
nxt_int_t ret;
nxt_cache_node_t *node;
nxt_lvlhsh_query_t lhq;
node = q->node;
node->accessed = nxt_cache_time(nxt_thread()) - cache->start_time;
node->updating = 0;
node->count = 1;
lhq.key_hash = nxt_murmur_hash2(node->key_data, node->key_len);
lhq.replace = 1;
lhq.key.len = node->key_len;
lhq.key.data = node->key_data;
lhq.value = node;
lhq.proto = cache->proto;
lhq.pool = cache->pool;
nxt_cache_lock(cache);
ret = nxt_lvlhsh_insert(&cache->lvlhsh, &lhq);
if (nxt_fast_path(ret != NXT_OK)) {
nxt_queue_insert_head(&cache->expiry_queue, &node->link);
node = lhq.value;
if (node != NULL) {
/* A replaced node. */
nxt_queue_remove(&node->link);
if (node->count != 0) {
node->deleted = 1;
} else {
// delete cache node
}
}
}
nxt_cache_unlock(cache);
return ret;
}
void
nxt_cache_release(nxt_cache_t *cache, nxt_cache_query_t *q)
{
u_char *p, *data;
size_t size;
ssize_t ret;
nxt_thread_t *thr;
u_char buf[1024];
thr = nxt_thread();
q->now = nxt_cache_time(thr);
p = buf;
size = sizeof(buf);
for ( ;; ) {
nxt_cache_lock(cache);
ret = nxt_cache_release_locked(cache, q, p, size);
nxt_cache_unlock(cache);
if (ret == 0) {
return;
}
size = nxt_abs(ret);
data = nxt_malloc(size);
if (data == NULL) {
/* TODO: retry */
return;
}
if (ret < 0) {
p = data;
continue;
}
if (p != data) {
nxt_memcpy(data, p, size);
}
nxt_thread_work_queue_add(thr, &thr->work_queue.main,
cache->delete_handler, data, NULL, thr->log);
}
}
static ssize_t
nxt_cache_release_locked(nxt_cache_t *cache, nxt_cache_query_t *q,
u_char *buf, size_t size)
{
ssize_t ret;
nxt_cache_node_t *node;
node = q->node;
node->count--;
if (node->count != 0) {
return 0;
}
if (!node->deleted) {
/*
* A cache node is locked whilst its count is non zero.
* To minimize number of operations the node's place in expiry
* queue can be updated only if the node is not currently used.
*/
node->accessed = q->now - cache->start_time;
nxt_queue_remove(&node->link);
nxt_queue_insert_head(&cache->expiry_queue, &node->link);
return 0;
}
ret = 0;
#if 0
ret = cache->delete_copy(cache, node, buf, size);
if (ret < 0) {
return ret;
}
#endif
nxt_cache_node_free(cache, node, 0);
return ret;
}
static nxt_cache_node_t *
nxt_cache_node_alloc(nxt_cache_t *cache)
{
nxt_queue_link_t *link;
nxt_cache_node_t *node;
link = nxt_queue_first(&cache->free_nodes);
if (nxt_fast_path(link != nxt_queue_tail(&cache->free_nodes))) {
cache->nfree_nodes--;
nxt_queue_remove(link);
node = nxt_queue_link_data(link, nxt_cache_node_t, link);
nxt_memzero(node, sizeof(nxt_cache_node_t));
return node;
}
nxt_cache_unlock(cache);
node = cache->alloc(cache->data, sizeof(nxt_cache_node_t));
nxt_cache_lock(cache);
return node;
}
static void
nxt_cache_node_free(nxt_cache_t *cache, nxt_cache_node_t *node, nxt_bool_t fast)
{
if (fast || cache->nfree_nodes < 32) {
nxt_queue_insert_head(&cache->free_nodes, &node->link);
cache->nfree_nodes++;
return;
}
nxt_cache_unlock(cache);
cache->free(cache->data, node);
nxt_cache_lock(cache);
}
static nxt_cache_query_wait_t *
nxt_cache_query_wait_alloc(nxt_cache_t *cache, nxt_bool_t *slow)
{
nxt_cache_query_wait_t *qw;
qw = cache->free_query_wait;
if (nxt_fast_path(qw != NULL)) {
cache->free_query_wait = qw->next;
cache->nfree_query_wait--;
*slow = 0;
return qw;
}
nxt_cache_unlock(cache);
qw = cache->alloc(cache->data, sizeof(nxt_cache_query_wait_t));
*slow = 1;
nxt_cache_lock(cache);
return qw;
}
static void
nxt_cache_query_wait_free(nxt_cache_t *cache, nxt_cache_query_wait_t *qw)
{
if (cache->nfree_query_wait < 32) {
qw->next = cache->free_query_wait;
cache->free_query_wait = qw;
cache->nfree_query_wait++;
return;
}
nxt_cache_unlock(cache);
cache->free(cache->data, qw);
nxt_cache_lock(cache);
}

122
src/nxt_cache.h Normal file
View File

@@ -0,0 +1,122 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_CACHE_INCLUDED_
#define _NXT_CACHE_INCLUDED_
typedef struct nxt_cache_query_s nxt_cache_query_t;
typedef struct nxt_cache_query_wait_s nxt_cache_query_wait_t;
typedef struct {
uint32_t shared; /* 1 bit */
nxt_thread_spinlock_t lock;
nxt_lvlhsh_t lvlhsh;
const nxt_lvlhsh_proto_t *proto;
void *pool;
nxt_queue_t expiry_queue;
nxt_queue_t free_nodes;
uint32_t nfree_nodes;
uint32_t nfree_query_wait;
nxt_cache_query_wait_t *free_query_wait;
uint64_t start_time;
/* STUB: use nxt_lvlhsh_proto_t */
void *(*alloc)(void *data, size_t size);
void (*free)(void *data, void *p);
void *data;
nxt_work_handler_t delete_handler;
} nxt_cache_t;
typedef struct {
u_char *key_data;
uint16_t key_len; /* 16 bits */
uint8_t uses; /* 8 bits */
uint8_t updating:1;
uint8_t deleted:1;
uint32_t count;
/* Times relative to the cache->start_time. */
uint32_t expiry;
uint32_t accessed;
nxt_off_t size;
nxt_queue_link_t link;
nxt_cache_query_wait_t *waiting;
} nxt_cache_node_t;
struct nxt_cache_query_wait_s {
nxt_cache_query_t *query;
nxt_cache_query_wait_t *next;
uint8_t busy; /* 1 bit */
uint8_t deleted; /* 1 bit */
nxt_pid_t pid;
nxt_event_engine_t *engine;
nxt_work_handler_t handler;
nxt_cache_t *cache;
};
typedef struct {
nxt_work_handler_t nocache_handler;
nxt_work_handler_t ready_handler;
nxt_work_handler_t stale_handler;
nxt_work_handler_t update_stale_handler;
nxt_work_handler_t update_handler;
nxt_work_handler_t timeout_handler;
nxt_work_handler_t error_handler;
} nxt_cache_query_state_t;
struct nxt_cache_query_s {
u_char *key_data;
uint16_t key_len; /* 16 bits */
#if (NXT_64_BIT)
uint8_t hold; /* 1 bit */
uint8_t use_stale; /* 1 bit */
uint8_t update_stale; /* 1 bit */
uint8_t stale; /* 1 bit */
#else
uint8_t hold:1;
uint8_t use_stale:1;
uint8_t update_stale:1;
uint8_t stale:1;
#endif
nxt_cache_node_t *node;
nxt_cache_query_t *next;
nxt_cache_query_state_t *state;
nxt_time_t now;
nxt_msec_t timeout;
nxt_event_timer_t timer;
};
NXT_EXPORT void nxt_cache_init(nxt_cache_t *cache);
NXT_EXPORT void nxt_cache_query(nxt_cache_t *cache, nxt_cache_query_t *q);
NXT_EXPORT void nxt_cache_release(nxt_cache_t *cache, nxt_cache_query_t *q);
NXT_EXPORT nxt_int_t nxt_cache_update(nxt_cache_t *cache, nxt_cache_query_t *q);
#endif /* _NXT_CACHE_INCLUDED_ */

456
src/nxt_chan.c Normal file
View File

@@ -0,0 +1,456 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static void nxt_chan_write_handler(nxt_thread_t *thr, void *obj, void *data);
static void nxt_chan_read_handler(nxt_thread_t *thr, void *obj, void *data);
static void nxt_chan_read_msg_process(nxt_thread_t *thr, nxt_chan_t *chan,
nxt_chan_msg_t *msg, nxt_fd_t fd, nxt_buf_t *b, size_t size);
static nxt_buf_t *nxt_chan_buf_alloc(nxt_chan_t *chan);
static void nxt_chan_buf_free(nxt_chan_t *chan, nxt_buf_t *b);
static void nxt_chan_error_handler(nxt_thread_t *thr, void *obj, void *data);
nxt_chan_t *
nxt_chan_alloc(void)
{
nxt_chan_t *chan;
nxt_mem_pool_t *mp;
mp = nxt_mem_pool_create(1024);
if (nxt_fast_path(mp != NULL)) {
/* This allocation cannot fail. */
chan = nxt_mem_zalloc(mp, sizeof(nxt_chan_t));
chan->mem_pool = mp;
chan->pair[0] = -1;
chan->pair[1] = -1;
nxt_queue_init(&chan->messages);
return chan;
}
return NULL;
}
nxt_chan_t *
nxt_chan_create(size_t max_size)
{
nxt_int_t sndbuf, rcvbuf, size;
nxt_chan_t *chan;
nxt_socket_t snd, rcv;
chan = nxt_chan_alloc();
if (nxt_slow_path(chan == NULL)) {
return NULL;
}
if (nxt_slow_path(nxt_socketpair_create(chan->pair) != NXT_OK)) {
goto socketpair_fail;
}
snd = chan->pair[1];
sndbuf = nxt_socket_getsockopt(snd, SOL_SOCKET, SO_SNDBUF);
if (nxt_slow_path(sndbuf < 0)) {
goto getsockopt_fail;
}
rcv = chan->pair[0];
rcvbuf = nxt_socket_getsockopt(rcv, SOL_SOCKET, SO_RCVBUF);
if (nxt_slow_path(rcvbuf < 0)) {
goto getsockopt_fail;
}
if (max_size == 0) {
max_size = 16 * 1024;
}
if ((size_t) sndbuf < max_size) {
/*
* On Unix domain sockets
* Linux uses 224K on both send and receive directions;
* FreeBSD, MacOSX, NetBSD, and OpenBSD use 2K buffer size
* on send direction and 4K buffer size on receive direction;
* Solaris uses 16K on send direction and 5K on receive direction.
*/
(void) nxt_socket_setsockopt(snd, SOL_SOCKET, SO_SNDBUF, max_size);
sndbuf = nxt_socket_getsockopt(snd, SOL_SOCKET, SO_SNDBUF);
if (nxt_slow_path(sndbuf < 0)) {
goto getsockopt_fail;
}
size = sndbuf * 4;
if (rcvbuf < size) {
(void) nxt_socket_setsockopt(rcv, SOL_SOCKET, SO_RCVBUF, size);
rcvbuf = nxt_socket_getsockopt(rcv, SOL_SOCKET, SO_RCVBUF);
if (nxt_slow_path(rcvbuf < 0)) {
goto getsockopt_fail;
}
}
}
chan->max_size = nxt_min(max_size, (size_t) sndbuf);
chan->max_share = (64 * 1024);
return chan;
getsockopt_fail:
nxt_socket_close(chan->pair[0]);
nxt_socket_close(chan->pair[1]);
socketpair_fail:
nxt_mem_pool_destroy(chan->mem_pool);
return NULL;
}
void
nxt_chan_destroy(nxt_chan_t *chan)
{
nxt_socket_close(chan->socket.fd);
nxt_mem_pool_destroy(chan->mem_pool);
}
void
nxt_chan_write_enable(nxt_thread_t *thr, nxt_chan_t *chan)
{
chan->socket.fd = chan->pair[1];
chan->socket.log = &nxt_main_log;
chan->socket.write_ready = 1;
chan->socket.write_work_queue = &thr->work_queue.main;
chan->socket.write_handler = nxt_chan_write_handler;
chan->socket.error_handler = nxt_chan_error_handler;
}
void
nxt_chan_write_close(nxt_chan_t *chan)
{
nxt_socket_close(chan->pair[1]);
chan->pair[1] = -1;
}
nxt_int_t
nxt_chan_write(nxt_chan_t *chan, nxt_uint_t type, nxt_fd_t fd, uint32_t stream,
nxt_buf_t *b)
{
nxt_thread_t *thr;
nxt_queue_link_t *link;
nxt_chan_send_msg_t *msg;
for (link = nxt_queue_first(&chan->messages);
link != nxt_queue_tail(&chan->messages);
link = nxt_queue_next(link))
{
msg = (nxt_chan_send_msg_t *) link;
if (msg->chan_msg.stream == stream) {
/*
* An fd is ignored since a file descriptor
* must be sent only in the first message of a stream.
*/
nxt_buf_chain_add(&msg->buf, b);
return NXT_OK;
}
}
msg = nxt_mem_cache_zalloc0(chan->mem_pool, sizeof(nxt_chan_send_msg_t));
if (nxt_slow_path(msg == NULL)) {
return NXT_ERROR;
}
msg->buf = b;
msg->fd = fd;
msg->share = 0;
msg->chan_msg.stream = stream;
msg->chan_msg.type = type;
msg->chan_msg.last = 0;
nxt_queue_insert_tail(&chan->messages, &msg->link);
if (chan->socket.write_ready) {
thr = nxt_thread();
nxt_chan_write_handler(thr, chan, NULL);
}
return NXT_OK;
}
static void
nxt_chan_write_handler(nxt_thread_t *thr, void *obj, void *data)
{
ssize_t n;
nxt_uint_t niob;
nxt_chan_t *chan;
struct iovec iob[NXT_IOBUF_MAX];
nxt_queue_link_t *link;
nxt_chan_send_msg_t *msg;
nxt_sendbuf_coalesce_t sb;
chan = obj;
do {
link = nxt_queue_first(&chan->messages);
if (link == nxt_queue_tail(&chan->messages)) {
nxt_event_fd_block_write(thr->engine, &chan->socket);
return;
}
msg = (nxt_chan_send_msg_t *) link;
nxt_iobuf_set(&iob[0], &msg->chan_msg, sizeof(nxt_chan_msg_t));
sb.buf = msg->buf;
sb.iobuf = &iob[1];
sb.nmax = NXT_IOBUF_MAX - 1;
sb.sync = 0;
sb.last = 0;
sb.size = sizeof(nxt_chan_msg_t);
sb.limit = chan->max_size;
niob = nxt_sendbuf_mem_coalesce(&sb);
msg->chan_msg.last = sb.last;
n = nxt_socketpair_send(&chan->socket, msg->fd, iob, niob + 1);
if (n > 0) {
if (nxt_slow_path((size_t) n != sb.size)) {
nxt_log_alert(thr->log,
"chan %d: short write: %z instead of %uz",
chan->socket.fd, n, sb.size);
goto fail;
}
msg->buf = nxt_sendbuf_completion(thr,
chan->socket.write_work_queue,
msg->buf,
n - sizeof(nxt_chan_msg_t));
if (msg->buf != NULL) {
/*
* A file descriptor is sent only
* in the first message of a stream.
*/
msg->fd = -1;
msg->share += n;
if (msg->share >= chan->max_share) {
msg->share = 0;
nxt_queue_remove(link);
nxt_queue_insert_tail(&chan->messages, link);
}
} else {
nxt_queue_remove(link);
nxt_mem_cache_free0(chan->mem_pool, msg,
sizeof(nxt_chan_send_msg_t));
}
} else if (nxt_slow_path(n == NXT_ERROR)) {
goto fail;
}
/* n == NXT_AGAIN */
} while (chan->socket.write_ready);
if (nxt_event_fd_is_disabled(chan->socket.write)) {
nxt_event_fd_enable_write(thr->engine, &chan->socket);
}
return;
fail:
nxt_thread_work_queue_add(thr, &thr->work_queue.main,
nxt_chan_error_handler,
&chan->socket, NULL, chan->socket.log);
}
void
nxt_chan_read_enable(nxt_thread_t *thr, nxt_chan_t *chan)
{
chan->socket.fd = chan->pair[0];
chan->socket.log = &nxt_main_log;
chan->socket.read_work_queue = &thr->work_queue.main;
chan->socket.read_handler = nxt_chan_read_handler;
chan->socket.error_handler = nxt_chan_error_handler;
nxt_event_fd_enable_read(thr->engine, &chan->socket);
}
void
nxt_chan_read_close(nxt_chan_t *chan)
{
nxt_socket_close(chan->pair[0]);
chan->pair[0] = -1;
}
static void
nxt_chan_read_handler(nxt_thread_t *thr, void *obj, void *data)
{
ssize_t n;
nxt_fd_t fd;
nxt_buf_t *b;
nxt_chan_t *chan;
nxt_iobuf_t iob[2];
nxt_chan_msg_t msg;
chan = obj;
for ( ;; ) {
b = nxt_chan_buf_alloc(chan);
if (nxt_slow_path(b == NULL)) {
/* TODO: disable event for some time */
}
nxt_iobuf_set(&iob[0], &msg, sizeof(nxt_chan_msg_t));
nxt_iobuf_set(&iob[1], b->mem.pos, chan->max_size);
n = nxt_socketpair_recv(&chan->socket, &fd, iob, 2);
if (n > 0) {
nxt_chan_read_msg_process(thr, chan, &msg, fd, b, n);
if (b->mem.pos == b->mem.free) {
if (b->next != NULL) {
/* A sync buffer */
nxt_buf_free(chan->mem_pool, b->next);
}
nxt_chan_buf_free(chan, b);
}
if (chan->socket.read_ready) {
continue;
}
return;
}
if (n == NXT_AGAIN) {
nxt_chan_buf_free(chan, b);
nxt_event_fd_enable_read(thr->engine, &chan->socket);
return;
}
/* n == 0 || n == NXT_ERROR */
nxt_thread_work_queue_add(thr, &thr->work_queue.main,
nxt_chan_error_handler,
&chan->socket, NULL, chan->socket.log);
return;
}
}
static void
nxt_chan_read_msg_process(nxt_thread_t *thr, nxt_chan_t *chan,
nxt_chan_msg_t *msg, nxt_fd_t fd, nxt_buf_t *b, size_t size)
{
nxt_buf_t *sync;
nxt_chan_recv_msg_t recv_msg;
if (nxt_slow_path(size < sizeof(nxt_chan_msg_t))) {
nxt_log_alert(chan->socket.log, "chan %d: too small message:%uz",
chan->socket.fd, size);
goto fail;
}
recv_msg.stream = msg->stream;
recv_msg.type = msg->type;
recv_msg.fd = fd;
recv_msg.buf = b;
recv_msg.chan = chan;
b->mem.free += size - sizeof(nxt_chan_msg_t);
if (msg->last) {
sync = nxt_buf_sync_alloc(chan->mem_pool, NXT_BUF_SYNC_LAST);
if (nxt_slow_path(sync == NULL)) {
goto fail;
}
b->next = sync;
}
chan->handler(thr, &recv_msg);
return;
fail:
if (fd != -1) {
nxt_fd_close(fd);
}
}
static nxt_buf_t *
nxt_chan_buf_alloc(nxt_chan_t *chan)
{
nxt_buf_t *b;
if (chan->free_bufs != NULL) {
b = chan->free_bufs;
chan->free_bufs = b->next;
b->mem.pos = b->mem.start;
b->mem.free = b->mem.start;
} else {
b = nxt_buf_mem_alloc(chan->mem_pool, chan->max_size, 0);
if (nxt_slow_path(b == NULL)) {
return NULL;
}
}
return b;
}
static void
nxt_chan_buf_free(nxt_chan_t *chan, nxt_buf_t *b)
{
b->next = chan->free_bufs;
chan->free_bufs = b;
}
static void
nxt_chan_error_handler(nxt_thread_t *thr, void *obj, void *data)
{
/* TODO */
}

73
src/nxt_chan.h Normal file
View File

@@ -0,0 +1,73 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_UNIX_CHAN_H_INCLUDED_
#define _NXT_UNIX_CHAN_H_INCLUDED_
typedef struct {
uint32_t stream;
uint16_t type;
uint8_t last; /* 1 bit */
} nxt_chan_msg_t;
typedef struct {
nxt_queue_link_t link;
nxt_buf_t *buf;
size_t share;
nxt_fd_t fd;
nxt_chan_msg_t chan_msg;
} nxt_chan_send_msg_t;
typedef struct nxt_chan_recv_msg_s nxt_chan_recv_msg_t;
typedef void (*nxt_chan_handler_t)(nxt_thread_t *thr, nxt_chan_recv_msg_t *msg);
typedef struct {
/* Must be the first field. */
nxt_event_fd_t socket;
nxt_queue_t messages; /* of nxt_chan_send_msg_t */
/* Maximum size of message part. */
uint32_t max_size;
/* Maximum interleave of message parts. */
uint32_t max_share;
nxt_chan_handler_t handler;
void *data;
nxt_mem_pool_t *mem_pool;
nxt_buf_t *free_bufs;
nxt_socket_t pair[2];
} nxt_chan_t;
struct nxt_chan_recv_msg_s {
uint32_t stream;
uint16_t type;
nxt_fd_t fd;
nxt_buf_t *buf;
nxt_chan_t *chan;
};
NXT_EXPORT nxt_chan_t *nxt_chan_alloc(void);
NXT_EXPORT nxt_chan_t *nxt_chan_create(size_t bufsize);
NXT_EXPORT void nxt_chan_destroy(nxt_chan_t *chan);
NXT_EXPORT void nxt_chan_write_enable(nxt_thread_t *thr, nxt_chan_t *chan);
NXT_EXPORT void nxt_chan_write_close(nxt_chan_t *chan);
NXT_EXPORT void nxt_chan_read_enable(nxt_thread_t *thr, nxt_chan_t *chan);
NXT_EXPORT void nxt_chan_read_close(nxt_chan_t *chan);
NXT_EXPORT nxt_int_t nxt_chan_write(nxt_chan_t *chan, nxt_uint_t type,
nxt_fd_t fd, uint32_t stream, nxt_buf_t *b);
#endif /* _NXT_UNIX_CHAN_H_INCLUDED_ */

214
src/nxt_clang.h Normal file
View File

@@ -0,0 +1,214 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_CLANG_H_INCLUDED_
#define _NXT_CLANG_H_INCLUDED_
#define nxt_inline static inline __attribute__((always_inline))
#define nxt_noinline __attribute__((noinline))
#define nxt_cdecl
#if (NXT_CLANG)
/* Any __asm__ directive disables loop vectorization in GCC and Clang. */
#define \
nxt_pragma_loop_disable_vectorization \
__asm__("")
#else
#define \
nxt_pragma_loop_disable_vectorization
#endif
#if (NXT_HAVE_BUILTIN_EXPECT)
#define \
nxt_fast_path(x) \
__builtin_expect((long) (x), 1)
#define \
nxt_slow_path(x) \
__builtin_expect((long) (x), 0)
#else
#define \
nxt_fast_path(x) \
(x)
#define \
nxt_slow_path(x) \
(x)
#endif
#if (NXT_HAVE_BUILTIN_UNREACHABLE)
#define \
nxt_unreachable() \
__builtin_unreachable()
#else
#define \
nxt_unreachable()
#endif
#if (NXT_HAVE_BUILTIN_PREFETCH)
#define \
nxt_prefetch(a) \
__builtin_prefetch(a)
#else
#define \
nxt_prefetch(a)
#endif
#if (NXT_HAVE_GCC_ATTRIBUTE_VISIBILITY)
#define NXT_EXPORT __attribute__((visibility("default")))
#else
#define NXT_EXPORT
#endif
#if (NXT_HAVE_GCC_ATTRIBUTE_MALLOC)
#define NXT_MALLOC_LIKE __attribute__((__malloc__))
#else
#define NXT_MALLOC_LIKE
#endif
#if (NXT_HAVE_GCC_ATTRIBUTE_ALIGNED)
#define nxt_aligned(x) __attribute__((aligned(x)))
#else
#define nxt_aligned(x)
#endif
#ifndef NXT_ALIGNMENT
#if (NXT_SOLARIS)
#define NXT_ALIGNMENT _POINTER_ALIGNMENT /* x86_64: 8, i386: 4 */
/* sparcv9: 8, sparcv8: 4 */
#elif (__i386__ || __i386)
#define NXT_ALIGNMENT 4
#elif (__arm__)
#define NXT_ALIGNMENT 8 /* 32-bit ARM may use 64-bit load/store */
#elif (__ia64__)
#define NXT_ALIGNMENT 8 /* long long */
#else
#define NXT_ALIGNMENT NXT_PTR_SIZE
#endif
#endif
#ifndef NXT_MAX_ALIGNMENT
#if (NXT_SOLARIS)
#define NXT_MAX_ALIGNMENT _MAX_ALIGNMENT /* x86_64: 16, i386: 4 */
/* sparcv9: 16, sparcv8: 8 */
#elif (__i386__ || __i386)
#define NXT_MAX_ALIGNMENT 4
#elif (__arm__)
#define NXT_MAX_ALIGNMENT 16
#elif (__ia64__)
#define NXT_MAX_ALIGNMENT 16
#else
#define NXT_MAX_ALIGNMENT 16
#endif
#endif
#define \
nxt_alloca(size) \
alloca(size)
#define \
nxt_container_of(p, type, field) \
(type *) ((u_char *) (p) - offsetof(type, field))
#define \
nxt_nitems(x) \
(sizeof(x) / sizeof((x)[0]))
/* GCC and Clang use __builtin_abs() instead of libc abs(). */
#define \
nxt_abs(val) \
abs(val)
#define \
nxt_max(val1, val2) \
((val1 < val2) ? (val2) : (val1))
#define \
nxt_min(val1, val2) \
((val1 > val2) ? (val2) : (val1))
#define \
nxt_bswap32(val) \
( ((val) >> 24) \
| (((val) & 0x00ff0000) >> 8) \
| (((val) & 0x0000ff00) << 8) \
| ((val) << 24))
#define \
nxt_align_size(d, a) \
(((d) + ((size_t) (a) - 1)) & ~((size_t) (a) - 1))
#define \
nxt_align_ptr(p, a) \
(u_char *) (((uintptr_t) (p) + ((uintptr_t) (a) - 1)) \
& ~((uintptr_t) (a) - 1))
#define \
nxt_trunc_ptr(p, a) \
(u_char *) ((uintptr_t) (p) & ~((uintptr_t) (a) - 1))
#endif /* _NXT_CLANG_H_INCLUDED_ */

621
src/nxt_cyassl.c Normal file
View File

@@ -0,0 +1,621 @@
/*
* Copyright (C) NGINX, Inc.
* Copyright (C) Igor Sysoev
*/
#include <nxt_main.h>
#include <cyassl/ssl.h>
#include <cyassl/error-ssl.h>
typedef struct {
CYASSL *session;
int ssl_error;
uint8_t times; /* 2 bits */
nxt_buf_mem_t buffer;
} nxt_cyassl_conn_t;
static nxt_int_t nxt_cyassl_server_init(nxt_ssltls_conf_t *conf);
static void nxt_cyassl_conn_init(nxt_thread_t *thr, nxt_ssltls_conf_t *conf,
nxt_event_conn_t *c);
static void nxt_cyassl_session_cleanup(void *data);
static int nxt_cyassl_io_recv(CYASSL *ssl, char *buf, int size, void *data);
static int nxt_cyassl_io_send(CYASSL *ssl, char *buf, int size, void *data);
static void nxt_cyassl_conn_handshake(nxt_thread_t *thr, void *obj, void *data);
static void nxt_cyassl_conn_io_read(nxt_thread_t *thr, void *obj, void *data);
static void nxt_cyassl_conn_io_shutdown(nxt_thread_t *thr, void *obj,
void *data);
static ssize_t nxt_cyassl_conn_io_write_chunk(nxt_thread_t *thr,
nxt_event_conn_t *c, nxt_buf_t *b, size_t limit);
static ssize_t nxt_cyassl_conn_io_send(nxt_event_conn_t *c, void *buf,
size_t size);
static nxt_int_t nxt_cyassl_conn_test_error(nxt_thread_t *thr,
nxt_event_conn_t *c, int err, nxt_work_handler_t handler);
static void nxt_cdecl nxt_cyassl_conn_error(nxt_event_conn_t *c, nxt_err_t err,
const char *fmt, ...);
static nxt_uint_t nxt_cyassl_log_error_level(nxt_event_conn_t *c, nxt_err_t err,
int ssl_error);
static void nxt_cdecl nxt_cyassl_log_error(nxt_uint_t level, nxt_log_t *log,
int ret, const char *fmt, ...);
static u_char *nxt_cyassl_copy_error(int err, u_char *p, u_char *end);
const nxt_ssltls_lib_t nxt_cyassl_lib = {
nxt_cyassl_server_init,
NULL,
};
static nxt_event_conn_io_t nxt_cyassl_event_conn_io = {
NULL,
NULL,
nxt_cyassl_conn_io_read,
NULL,
NULL,
nxt_event_conn_io_write,
nxt_cyassl_conn_io_write_chunk,
NULL,
NULL,
nxt_cyassl_conn_io_send,
nxt_cyassl_conn_io_shutdown,
};
static nxt_int_t
nxt_cyassl_start(void)
{
int err;
nxt_thread_t *thr;
static nxt_bool_t started;
if (nxt_fast_path(started)) {
return NXT_OK;
}
started = 1;
thr = nxt_thread();
/* TODO: CyaSSL_Cleanup() */
err = CyaSSL_Init();
if (err != SSL_SUCCESS) {
nxt_cyassl_log_error(NXT_LOG_CRIT, thr->log, err,
"CyaSSL_Init() failed");
return NXT_ERROR;
}
nxt_thread_log_error(NXT_LOG_INFO, "CyaSSL version: %s",
LIBCYASSL_VERSION_STRING);
/* CyaSSL_SetLoggingCb */
/* CyaSSL_SetAllocators */
return NXT_OK;
}
static nxt_int_t
nxt_cyassl_server_init(nxt_ssltls_conf_t *conf)
{
int err;
char *certificate, *key;
CYASSL_CTX *ctx;
nxt_thread_t *thr;
thr = nxt_thread();
if (nxt_slow_path(nxt_cyassl_start() != NXT_OK)) {
return NXT_ERROR;
}
ctx = CyaSSL_CTX_new(CyaSSLv23_server_method());
if (ctx == NULL) {
nxt_cyassl_log_error(NXT_LOG_CRIT, thr->log, 0,
"CyaSSL_CTX_new() failed");
return NXT_ERROR;
}
conf->ctx = ctx;
conf->conn_init = nxt_cyassl_conn_init;
certificate = conf->certificate;
err = CyaSSL_CTX_use_certificate_file(ctx, certificate, SSL_FILETYPE_PEM);
if (err != SSL_SUCCESS) {
nxt_cyassl_log_error(NXT_LOG_CRIT, thr->log, err,
"CyaSSL_CTX_use_certificate_file(\"%s\") failed",
certificate);
goto fail;
}
key = conf->certificate_key;
err = CyaSSL_CTX_use_PrivateKey_file(ctx, key, SSL_FILETYPE_PEM);
if (err != SSL_SUCCESS) {
nxt_cyassl_log_error(NXT_LOG_CRIT, thr->log, err,
"CyaSSL_CTX_use_PrivateKey_file(\"%s\") failed",
key);
goto fail;
}
if (conf->ciphers != NULL) {
err = CyaSSL_CTX_set_cipher_list(ctx, conf->ciphers);
if (err != SSL_SUCCESS) {
nxt_cyassl_log_error(NXT_LOG_CRIT, thr->log, err,
"CyaSSL_CTX_set_cipher_list(\"%s\") failed",
conf->ciphers);
goto fail;
}
}
/* TODO: ca_certificate */
CyaSSL_SetIORecv(ctx, nxt_cyassl_io_recv);
CyaSSL_SetIOSend(ctx, nxt_cyassl_io_send);
return NXT_OK;
fail:
CyaSSL_CTX_free(ctx);
return NXT_ERROR;
}
static void
nxt_cyassl_conn_init(nxt_thread_t *thr, nxt_ssltls_conf_t *conf,
nxt_event_conn_t *c)
{
CYASSL *s;
CYASSL_CTX *ctx;
nxt_cyassl_conn_t *ssltls;
nxt_mem_pool_cleanup_t *mpcl;
nxt_log_debug(c->socket.log, "cyassl conn init");
ssltls = nxt_mem_zalloc(c->mem_pool, sizeof(nxt_cyassl_conn_t));
if (ssltls == NULL) {
goto fail;
}
c->u.ssltls = ssltls;
nxt_buf_mem_set_size(&ssltls->buffer, conf->buffer_size);
mpcl = nxt_mem_pool_cleanup(c->mem_pool, 0);
if (mpcl == NULL) {
goto fail;
}
ctx = conf->ctx;
s = CyaSSL_new(ctx);
if (s == NULL) {
nxt_cyassl_log_error(NXT_LOG_CRIT, c->socket.log, 0,
"CyaSSL_new() failed");
goto fail;
}
ssltls->session = s;
mpcl->handler = nxt_cyassl_session_cleanup;
mpcl->data = ssltls;
CyaSSL_SetIOReadCtx(s, c);
CyaSSL_SetIOWriteCtx(s, c);
c->io = &nxt_cyassl_event_conn_io;
c->sendfile = NXT_CONN_SENDFILE_OFF;
nxt_cyassl_conn_handshake(thr, c, c->socket.data);
return;
fail:
nxt_event_conn_io_handle(thr, c->read_work_queue,
c->read_state->error_handler, c, c->socket.data);
}
static void
nxt_cyassl_session_cleanup(void *data)
{
nxt_cyassl_conn_t *ssltls;
ssltls = data;
nxt_thread_log_debug("cyassl session cleanup");
nxt_free(ssltls->buffer.start);
CyaSSL_free(ssltls->session);
}
static int
nxt_cyassl_io_recv(CYASSL *ssl, char *buf, int size, void *data)
{
ssize_t n;
nxt_thread_t *thr;
nxt_event_conn_t *c;
c = data;
thr = nxt_thread();
n = thr->engine->event->io->recv(c, (u_char *) buf, size, 0);
if (n > 0) {
return n;
}
if (n == 0) {
return CYASSL_CBIO_ERR_CONN_CLOSE;
}
if (n == NXT_AGAIN) {
return CYASSL_CBIO_ERR_WANT_READ;
}
return CYASSL_CBIO_ERR_GENERAL;
}
static int
nxt_cyassl_io_send(CYASSL *ssl, char *buf, int size, void *data)
{
ssize_t n;
nxt_thread_t *thr;
nxt_event_conn_t *c;
c = data;
thr = nxt_thread();
n = thr->engine->event->io->send(c, (u_char *) buf, size);
if (n > 0) {
return n;
}
if (n == NXT_AGAIN) {
return CYASSL_CBIO_ERR_WANT_WRITE;
}
return CYASSL_CBIO_ERR_GENERAL;
}
static void
nxt_cyassl_conn_handshake(nxt_thread_t *thr, void *obj, void *data)
{
int ret;
nxt_int_t n;
nxt_err_t err;
nxt_event_conn_t *c;
nxt_cyassl_conn_t *ssltls;
c = obj;
ssltls = c->u.ssltls;
nxt_log_debug(thr->log, "cyassl conn handshake: %d", ssltls->times);
/* "ssltls->times == 1" is suitable to run CyaSSL_negotiate() in job. */
ret = CyaSSL_negotiate(ssltls->session);
err = (ret != 0) ? nxt_socket_errno : 0;
nxt_thread_time_debug_update(thr);
nxt_log_debug(thr->log, "CyaSSL_negotiate(%d): %d", c->socket.fd, ret);
if (ret == 0) {
nxt_cyassl_conn_io_read(thr, c, data);
return;
}
n = nxt_cyassl_conn_test_error(thr, c, ret, nxt_cyassl_conn_handshake);
if (n == NXT_ERROR) {
nxt_cyassl_conn_error(c, err, "CyaSSL_negotiate(%d) failed",
c->socket.fd);
nxt_event_conn_io_handle(thr, c->read_work_queue,
c->read_state->error_handler, c, data);
} else if (ssltls->ssl_error == SSL_ERROR_WANT_READ && ssltls->times < 2) {
ssltls->times++;
}
}
static void
nxt_cyassl_conn_io_read(nxt_thread_t *thr, void *obj, void *data)
{
int ret;
nxt_buf_t *b;
nxt_err_t err;
nxt_int_t n;
nxt_event_conn_t *c;
nxt_cyassl_conn_t *ssltls;
nxt_work_handler_t handler;
c = obj;
nxt_log_debug(thr->log, "cyassl conn read");
handler = c->read_state->ready_handler;
b = c->read;
/* b == NULL is used to test descriptor readiness. */
if (b != NULL) {
ssltls = c->u.ssltls;
ret = CyaSSL_read(ssltls->session, b->mem.free,
b->mem.end - b->mem.free);
err = (ret <= 0) ? nxt_socket_errno : 0;
nxt_log_debug(thr->log, "CyaSSL_read(%d, %p, %uz): %d",
c->socket.fd, b->mem.free, b->mem.end - b->mem.free, ret);
if (ret > 0) {
/* c->socket.read_ready is kept. */
b->mem.free += ret;
handler = c->read_state->ready_handler;
} else {
n = nxt_cyassl_conn_test_error(thr, c, ret,
nxt_cyassl_conn_io_read);
if (nxt_fast_path(n != NXT_ERROR)) {
return;
}
nxt_cyassl_conn_error(c, err, "CyaSSL_read(%d, %p, %uz) failed",
c->socket.fd, b->mem.free,
b->mem.end - b->mem.free);
handler = c->read_state->error_handler;
}
}
nxt_event_conn_io_handle(thr, c->read_work_queue, handler, c, data);
}
static ssize_t
nxt_cyassl_conn_io_write_chunk(nxt_thread_t *thr, nxt_event_conn_t *c,
nxt_buf_t *b, size_t limit)
{
nxt_cyassl_conn_t *ssltls;
nxt_log_debug(thr->log, "cyassl conn write chunk");
ssltls = c->u.ssltls;
return nxt_sendbuf_copy_coalesce(c, &ssltls->buffer, b, limit);
}
static ssize_t
nxt_cyassl_conn_io_send(nxt_event_conn_t *c, void *buf, size_t size)
{
int ret;
nxt_err_t err;
nxt_int_t n;
nxt_cyassl_conn_t *ssltls;
nxt_log_debug(c->socket.log, "cyassl send");
ssltls = c->u.ssltls;
ret = CyaSSL_write(ssltls->session, buf, size);
if (ret <= 0) {
err = nxt_socket_errno;
c->socket.error = err;
} else {
err = 0;
}
nxt_log_debug(c->socket.log, "CyaSSL_write(%d, %p, %uz): %d",
c->socket.fd, buf, size, ret);
if (ret > 0) {
return ret;
}
n = nxt_cyassl_conn_test_error(nxt_thread(), c, ret,
nxt_event_conn_io_write);
if (nxt_slow_path(n == NXT_ERROR)) {
nxt_cyassl_conn_error(c, err, "CyaSSL_write(%d, %p, %uz) failed",
c->socket.fd, buf, size);
}
return n;
}
static void
nxt_cyassl_conn_io_shutdown(nxt_thread_t *thr, void *obj, void *data)
{
int ret;
nxt_event_conn_t *c;
nxt_cyassl_conn_t *ssltls;
c = obj;
nxt_log_debug(thr->log, "cyassl conn shutdown");
ssltls = c->u.ssltls;
ret = CyaSSL_shutdown(ssltls->session);
nxt_log_debug(thr->log, "CyaSSL_shutdown(%d): %d", c->socket.fd, ret);
if (nxt_slow_path(ret != SSL_SUCCESS)) {
nxt_cyassl_conn_error(c, 0, "CyaSSL_shutdown(%d) failed", c->socket.fd);
}
nxt_event_conn_io_handle(thr, c->write_work_queue,
c->write_state->close_handler, c, data);
}
static nxt_int_t
nxt_cyassl_conn_test_error(nxt_thread_t *thr, nxt_event_conn_t *c, int ret,
nxt_work_handler_t handler)
{
nxt_work_queue_t *wq;
nxt_cyassl_conn_t *ssltls;
ssltls = c->u.ssltls;
ssltls->ssl_error = CyaSSL_get_error(ssltls->session, ret);
nxt_log_debug(thr->log, "CyaSSL_get_error(): %d", ssltls->ssl_error);
switch (ssltls->ssl_error) {
case SSL_ERROR_WANT_READ:
nxt_event_fd_block_write(thr->engine, &c->socket);
c->socket.read_ready = 0;
c->socket.read_handler = handler;
if (nxt_event_fd_is_disabled(c->socket.read)) {
nxt_event_fd_enable_read(thr->engine, &c->socket);
}
return NXT_AGAIN;
case SSL_ERROR_WANT_WRITE:
nxt_event_fd_block_read(thr->engine, &c->socket);
c->socket.write_ready = 0;
c->socket.write_handler = handler;
if (nxt_event_fd_is_disabled(c->socket.write)) {
nxt_event_fd_enable_write(thr->engine, &c->socket);
}
return NXT_AGAIN;
case SSL_ERROR_ZERO_RETURN:
/* A "close notify" alert */
if (c->read_state != NULL) {
wq = c->read_work_queue;
handler = c->read_state->close_handler;
} else {
wq = c->write_work_queue;
handler = c->write_state->close_handler;
}
nxt_event_conn_io_handle(thr, wq, handler, c, c->socket.data);
return 0;
default:
return NXT_ERROR;
}
}
static void nxt_cdecl
nxt_cyassl_conn_error(nxt_event_conn_t *c, nxt_err_t err, const char *fmt, ...)
{
u_char *p, *end;
va_list args;
nxt_uint_t level;
nxt_cyassl_conn_t *ssltls;
u_char msg[NXT_MAX_ERROR_STR];
ssltls = c->u.ssltls;
level = nxt_cyassl_log_error_level(c, err, ssltls->ssl_error);
if (nxt_log_level_enough(c->socket.log, level)) {
end = msg + sizeof(msg);
va_start(args, fmt);
p = nxt_vsprintf(msg, end, fmt, args);
va_end(args);
if (err != 0) {
p = nxt_sprintf(p, end, " %E", err);
}
p = nxt_cyassl_copy_error(ssltls->ssl_error, p, end);
nxt_log_error(level, c->socket.log, "%*s", p - msg, msg);
}
}
static nxt_uint_t
nxt_cyassl_log_error_level(nxt_event_conn_t *c, nxt_err_t err, int ssl_error)
{
switch (ssl_error) {
case SOCKET_ERROR_E: /* -208 */
case MATCH_SUITE_ERROR: /* -261 */
break;
default:
return NXT_LOG_CRIT;
}
return NXT_LOG_INFO;
}
static void nxt_cdecl
nxt_cyassl_log_error(nxt_uint_t level, nxt_log_t *log, int err,
const char *fmt, ...)
{
u_char *p, *end;
va_list args;
u_char msg[NXT_MAX_ERROR_STR];
if (nxt_log_level_enough(log, level)) {
end = msg + sizeof(msg);
va_start(args, fmt);
p = nxt_vsprintf(msg, end, fmt, args);
va_end(args);
p = nxt_cyassl_copy_error(err, p, end);
nxt_log_error(level, log, "%*s", p - msg, msg);
}
}
static u_char *
nxt_cyassl_copy_error(int err, u_char *p, u_char *end)
{
p = nxt_sprintf(p, end, " (SSL:%d ", err);
CyaSSL_ERR_error_string_n(err, (char *) p, end - p);
p += nxt_strlen(p);
if (p < end) {
*p++ = ')';
}
return p;
}

1743
src/nxt_cycle.c Normal file

File diff suppressed because it is too large Load Diff

159
src/nxt_cycle.h Normal file
View File

@@ -0,0 +1,159 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Valentin V. Bartenev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_CYCLE_H_INCLUDED_
#define _NXT_CYCLE_H_INCLUDED_
typedef enum {
NXT_PROCESS_SINGLE = 0,
NXT_PROCESS_MASTER,
NXT_PROCESS_WORKER,
} nxt_process_type_e;
typedef struct nxt_cycle_s nxt_cycle_t;
typedef void (*nxt_cycle_cont_t)(nxt_thread_t *thr, nxt_cycle_t *cycle);
struct nxt_cycle_s {
nxt_mem_pool_t *mem_pool;
nxt_cycle_t *previous;
nxt_array_t *inherited_sockets; /* of nxt_listen_socket_t */
nxt_array_t *listen_sockets; /* of nxt_listen_socket_t */
nxt_array_t *services; /* of nxt_service_t */
nxt_array_t *engines; /* of nxt_event_engine_t */
nxt_cycle_cont_t start;
nxt_str_t *config_name;
nxt_str_t *conf_prefix;
nxt_str_t *prefix;
nxt_str_t hostname;
nxt_file_name_t *pid_file;
nxt_file_name_t *oldbin_file;
nxt_pid_t new_binary;
#if (NXT_THREADS)
nxt_array_t *thread_pools; /* of nxt_thread_pool_t */
nxt_cycle_cont_t continuation;
#endif
nxt_array_t *processes; /* of nxt_process_chan_t */
nxt_list_t *log_files; /* of nxt_file_t */
nxt_array_t *shm_zones; /* of nxt_cycle_shm_zone_t */
uint32_t process_generation;
uint32_t current_process;
uint32_t last_engine_id;
nxt_process_type_e type;
uint8_t test_config; /* 1 bit */
uint8_t reconfiguring; /* 1 bit */
void **core_ctx;
nxt_event_timer_t timer;
uint8_t daemon;
uint8_t batch;
uint8_t master_process;
const char *engine;
uint32_t engine_connections;
uint32_t worker_processes;
uint32_t auxiliary_threads;
nxt_user_cred_t user_cred;
const char *group;
const char *pid;
const char *error_log;
nxt_sockaddr_t *listen;
};
typedef struct {
void *addr;
size_t size;
nxt_uint_t page_size;
nxt_str_t name;
} nxt_cycle_shm_zone_t;
typedef nxt_int_t (*nxt_module_init_t)(nxt_thread_t *thr, nxt_cycle_t *cycle);
nxt_thread_extern_data(nxt_cycle_t *, nxt_thread_cycle_data);
nxt_inline void
nxt_thread_cycle_set(nxt_cycle_t *cycle)
{
nxt_cycle_t **p;
p = nxt_thread_get_data(nxt_thread_cycle_data);
*p = cycle;
}
nxt_inline nxt_cycle_t *
nxt_thread_cycle(void)
{
nxt_cycle_t **p;
p = nxt_thread_get_data(nxt_thread_cycle_data);
return *p;
}
nxt_int_t nxt_cycle_create(nxt_thread_t *thr, nxt_cycle_t *previous,
nxt_cycle_cont_t start, nxt_str_t *config_name, nxt_bool_t test_config);
void nxt_cycle_quit(nxt_thread_t *thr, nxt_cycle_t *cycle);
void nxt_cycle_event_engine_free(nxt_cycle_t *cycle);
#if (NXT_THREADS)
nxt_int_t nxt_cycle_thread_pool_create(nxt_thread_t *thr, nxt_cycle_t *cycle,
nxt_uint_t max_threads, nxt_nsec_t timeout);
#endif
/* STUB */
nxt_str_t *nxt_current_directory(nxt_mem_pool_t *mp);
nxt_int_t nxt_cycle_pid_file_create(nxt_file_name_t *pid_file, nxt_bool_t test);
nxt_listen_socket_t *nxt_cycle_listen_socket_add(nxt_cycle_t *cycle,
nxt_sockaddr_t *sa);
nxt_int_t nxt_cycle_listen_sockets_enable(nxt_thread_t *thr,
nxt_cycle_t *cycle);
nxt_file_t *nxt_cycle_log_file_add(nxt_cycle_t *cycle, nxt_str_t *name);
nxt_int_t nxt_cycle_shm_zone_add(nxt_cycle_t *cycle, nxt_str_t *name,
size_t size, nxt_uint_t page_size);
/* STUB */
void nxt_cdecl nxt_log_time_handler(nxt_uint_t level, nxt_log_t *log,
const char *fmt, ...);
nxt_int_t nxt_app_start(nxt_cycle_t *cycle);
extern nxt_module_init_t nxt_init_modules[];
extern nxt_uint_t nxt_init_modules_n;
#endif /* _NXT_CYCLE_H_INCLIDED_ */

699
src/nxt_devpoll.c Normal file
View File

@@ -0,0 +1,699 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* "/dev/poll" has been introduced in Solaris 7 (11/99), HP-UX 11.22 (named
* "eventport pseudo driver" internally, not to be confused with Solaris 10
* event ports), IRIX 6.5.15, and Tru64 UNIX 5.1A.
*
* Although "/dev/poll" descriptor is a file descriptor, nevertheless
* it cannot be added to another poll set, Solaris poll(7d):
*
* The /dev/poll driver does not yet support polling. Polling on a
* /dev/poll file descriptor will result in POLLERR being returned
* in the revents field of pollfd structure.
*/
#define NXT_DEVPOLL_ADD 0
#define NXT_DEVPOLL_UPDATE 1
#define NXT_DEVPOLL_CHANGE 2
#define NXT_DEVPOLL_DELETE 3
static nxt_event_set_t *nxt_devpoll_create(nxt_event_signals_t *signals,
nxt_uint_t mchanges, nxt_uint_t mevents);
static void nxt_devpoll_free(nxt_event_set_t *event_set);
static void nxt_devpoll_enable(nxt_event_set_t *event_set, nxt_event_fd_t *ev);
static void nxt_devpoll_disable(nxt_event_set_t *event_set, nxt_event_fd_t *ev);
#if (NXT_HPUX)
static void nxt_devpoll_close(nxt_event_set_t *event_set, nxt_event_fd_t *ev);
static void nxt_devpoll_drop_changes(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
#endif
static void nxt_devpoll_enable_read(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_devpoll_enable_write(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_devpoll_disable_read(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_devpoll_disable_write(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_devpoll_block_read(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_devpoll_block_write(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_devpoll_oneshot_read(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_devpoll_oneshot_write(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_devpoll_change(nxt_event_set_t *event_set, nxt_event_fd_t *ev,
nxt_uint_t op, nxt_uint_t events);
static nxt_int_t nxt_devpoll_commit_changes(nxt_thread_t *thr,
nxt_devpoll_event_set_t *ds);
static void nxt_devpoll_change_error(nxt_thread_t *thr,
nxt_devpoll_event_set_t *ds, nxt_event_fd_t *ev);
static void nxt_devpoll_remove(nxt_thread_t *thr, nxt_devpoll_event_set_t *ds,
nxt_fd_t fd);
static nxt_int_t nxt_devpoll_write(nxt_thread_t *thr, int devpoll,
struct pollfd *pfd, size_t n);
static void nxt_devpoll_set_poll(nxt_thread_t *thr, nxt_event_set_t *event_set,
nxt_msec_t timeout);
const nxt_event_set_ops_t nxt_devpoll_event_set = {
"devpoll",
nxt_devpoll_create,
nxt_devpoll_free,
nxt_devpoll_enable,
nxt_devpoll_disable,
nxt_devpoll_disable,
#if (NXT_HPUX)
nxt_devpoll_close,
#else
nxt_devpoll_disable,
#endif
nxt_devpoll_enable_read,
nxt_devpoll_enable_write,
nxt_devpoll_disable_read,
nxt_devpoll_disable_write,
nxt_devpoll_block_read,
nxt_devpoll_block_write,
nxt_devpoll_oneshot_read,
nxt_devpoll_oneshot_write,
nxt_devpoll_enable_read,
NULL,
NULL,
NULL,
NULL,
nxt_devpoll_set_poll,
&nxt_unix_event_conn_io,
NXT_NO_FILE_EVENTS,
NXT_NO_SIGNAL_EVENTS,
};
static nxt_event_set_t *
nxt_devpoll_create(nxt_event_signals_t *signals, nxt_uint_t mchanges,
nxt_uint_t mevents)
{
nxt_event_set_t *event_set;
nxt_devpoll_event_set_t *ds;
event_set = nxt_zalloc(sizeof(nxt_devpoll_event_set_t));
if (event_set == NULL) {
return NULL;
}
ds = &event_set->devpoll;
ds->devpoll = -1;
ds->mchanges = mchanges;
ds->mevents = mevents;
ds->devpoll_changes = nxt_malloc(sizeof(nxt_devpoll_change_t) * mchanges);
if (ds->devpoll_changes == NULL) {
goto fail;
}
/*
* NXT_DEVPOLL_CHANGE requires two struct pollfd's:
* for POLLREMOVE and subsequent POLLIN or POLLOUT.
*/
ds->changes = nxt_malloc(2 * sizeof(struct pollfd) * mchanges);
if (ds->changes == NULL) {
goto fail;
}
ds->events = nxt_malloc(sizeof(struct pollfd) * mevents);
if (ds->events == NULL) {
goto fail;
}
ds->devpoll = open("/dev/poll", O_RDWR);
if (ds->devpoll == -1) {
nxt_main_log_emerg("open(/dev/poll) failed %E", nxt_errno);
goto fail;
}
nxt_main_log_debug("open(/dev/poll): %d", ds->devpoll);
return event_set;
fail:
nxt_devpoll_free(event_set);
return NULL;
}
static void
nxt_devpoll_free(nxt_event_set_t *event_set)
{
nxt_devpoll_event_set_t *ds;
ds = &event_set->devpoll;
nxt_main_log_debug("devpoll %d free", ds->devpoll);
if (ds->devpoll != -1) {
if (close(ds->devpoll) != 0) {
nxt_main_log_emerg("devpoll close(%d) failed %E",
ds->devpoll, nxt_errno);
}
}
nxt_free(ds->events);
nxt_free(ds->changes);
nxt_free(ds->devpoll_changes);
nxt_event_set_fd_hash_destroy(&ds->fd_hash);
nxt_free(ds);
}
static void
nxt_devpoll_enable(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
ev->read = NXT_EVENT_DEFAULT;
ev->write = NXT_EVENT_DEFAULT;
nxt_devpoll_change(event_set, ev, NXT_DEVPOLL_ADD, POLLIN | POLLOUT);
}
/*
* Solaris does not automatically remove a closed file descriptor from
* a "/dev/poll" set: ioctl(DP_ISPOLLED) for the descriptor returns 1,
* significative of active descriptor. POLLREMOVE can remove already
* closed file descriptor, so the removal can be batched, Solaris poll(7d):
*
* When using the "/dev/poll" driver, you should remove a closed file
* descriptor from a monitored poll set. Failure to do so may result
* in a POLLNVAL revents being returned for the closed file descriptor.
* When a file descriptor is closed but not removed from the monitored
* set, and is reused in subsequent open of a different device, you
* will be polling the device associated with the reused file descriptor.
* In a multithreaded application, careful coordination among threads
* doing close and DP_POLL ioctl is recommended for consistent results.
*
* Besides Solaris and HP-UX allow to add invalid descriptors to an
* "/dev/poll" set, although the descriptors are not marked as polled,
* that is, ioctl(DP_ISPOLLED) returns 0.
*/
static void
nxt_devpoll_disable(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
if (ev->read != NXT_EVENT_INACTIVE || ev->write != NXT_EVENT_INACTIVE) {
ev->read = NXT_EVENT_INACTIVE;
ev->write = NXT_EVENT_INACTIVE;
nxt_devpoll_change(event_set, ev, NXT_DEVPOLL_DELETE, POLLREMOVE);
}
}
#if (NXT_HPUX)
/*
* HP-UX poll(7):
*
* When a polled file descriptor is closed, it is automatically
* deregistered.
*/
static void
nxt_devpoll_close(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
ev->read = NXT_EVENT_INACTIVE;
ev->write = NXT_EVENT_INACTIVE;
nxt_devpoll_drop_changes(event_set, ev);
}
static void
nxt_devpoll_drop_changes(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_devpoll_change_t *dst, *src, *end;
nxt_devpoll_event_set_t *ds;
ds = &event_set->devpoll;
dst = ds->devpoll_changes;
end = dst + ds->nchanges;
for (src = dst; src < end; src++) {
if (src->event == ev) {
continue;
}
if (dst != src) {
*dst = *src;
}
dst++;
}
ds->nchanges -= end - dst;
}
#endif
/*
* Solaris poll(7d):
*
* The fd field specifies the file descriptor being polled. The events
* field indicates the interested poll events on the file descriptor.
* If a pollfd array contains multiple pollfd entries with the same fd field,
* the "events" field in each pollfd entry is OR'ed. A special POLLREMOVE
* event in the events field of the pollfd structure removes the fd from
* the monitored set. The revents field is not used.
*/
static void
nxt_devpoll_enable_read(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_uint_t op, events;
if (ev->read != NXT_EVENT_BLOCKED) {
events = POLLIN;
if (ev->write == NXT_EVENT_INACTIVE) {
op = NXT_DEVPOLL_ADD;
} else if (ev->write == NXT_EVENT_BLOCKED) {
ev->write = NXT_EVENT_INACTIVE;
op = NXT_DEVPOLL_CHANGE;
} else {
op = NXT_DEVPOLL_UPDATE;
events = POLLIN | POLLOUT;
}
nxt_devpoll_change(event_set, ev, op, events);
}
ev->read = NXT_EVENT_DEFAULT;
}
static void
nxt_devpoll_enable_write(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_uint_t op, events;
if (ev->write != NXT_EVENT_BLOCKED) {
events = POLLOUT;
if (ev->read == NXT_EVENT_INACTIVE) {
op = NXT_DEVPOLL_ADD;
} else if (ev->read == NXT_EVENT_BLOCKED) {
ev->read = NXT_EVENT_INACTIVE;
op = NXT_DEVPOLL_CHANGE;
} else {
op = NXT_DEVPOLL_UPDATE;
events = POLLIN | POLLOUT;
}
nxt_devpoll_change(event_set, ev, op, events);
}
ev->write = NXT_EVENT_DEFAULT;
}
static void
nxt_devpoll_disable_read(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_uint_t op, events;
ev->read = NXT_EVENT_INACTIVE;
if (ev->write <= NXT_EVENT_BLOCKED) {
ev->write = NXT_EVENT_INACTIVE;
op = NXT_DEVPOLL_DELETE;
events = POLLREMOVE;
} else {
op = NXT_DEVPOLL_CHANGE;
events = POLLOUT;
}
nxt_devpoll_change(event_set, ev, op, events);
}
static void
nxt_devpoll_disable_write(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_uint_t op, events;
ev->write = NXT_EVENT_INACTIVE;
if (ev->read <= NXT_EVENT_BLOCKED) {
ev->read = NXT_EVENT_INACTIVE;
op = NXT_DEVPOLL_DELETE;
events = POLLREMOVE;
} else {
op = NXT_DEVPOLL_CHANGE;
events = POLLIN;
}
nxt_devpoll_change(event_set, ev, op, events);
}
static void
nxt_devpoll_block_read(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
if (ev->read != NXT_EVENT_INACTIVE) {
ev->read = NXT_EVENT_BLOCKED;
}
}
static void
nxt_devpoll_block_write(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
if (ev->write != NXT_EVENT_INACTIVE) {
ev->write = NXT_EVENT_BLOCKED;
}
}
static void
nxt_devpoll_oneshot_read(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_devpoll_enable_read(event_set, ev);
ev->read = NXT_EVENT_ONESHOT;
}
static void
nxt_devpoll_oneshot_write(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_devpoll_enable_write(event_set, ev);
ev->write = NXT_EVENT_ONESHOT;
}
static void
nxt_devpoll_change(nxt_event_set_t *event_set, nxt_event_fd_t *ev,
nxt_uint_t op, nxt_uint_t events)
{
nxt_devpoll_change_t *ch;
nxt_devpoll_event_set_t *ds;
ds = &event_set->devpoll;
nxt_log_debug(ev->log, "devpoll %d change fd:%d op:%ui ev:%04Xi",
ds->devpoll, ev->fd, op, events);
if (ds->nchanges >= ds->mchanges) {
(void) nxt_devpoll_commit_changes(nxt_thread(), ds);
}
ch = &ds->devpoll_changes[ds->nchanges++];
ch->op = op;
ch->fd = ev->fd;
ch->events = events;
ch->event = ev;
}
static nxt_int_t
nxt_devpoll_commit_changes(nxt_thread_t *thr, nxt_devpoll_event_set_t *ds)
{
size_t n;
nxt_int_t ret, retval;
struct pollfd *pfd;
nxt_devpoll_change_t *ch, *end;
nxt_log_debug(thr->log, "devpoll %d changes:%ui",
ds->devpoll, ds->nchanges);
retval = NXT_OK;
n = 0;
ch = ds->devpoll_changes;
end = ch + ds->nchanges;
do {
nxt_log_debug(thr->log, "devpoll fd:%d op:%d ev:%04Xd",
ch->fd, ch->op, ch->events);
if (ch->op == NXT_DEVPOLL_CHANGE) {
pfd = &ds->changes[n++];
pfd->fd = ch->fd;
pfd->events = POLLREMOVE;
pfd->revents = 0;
}
pfd = &ds->changes[n++];
pfd->fd = ch->fd;
pfd->events = ch->events;
pfd->revents = 0;
ch++;
} while (ch < end);
ch = ds->devpoll_changes;
end = ch + ds->nchanges;
ret = nxt_devpoll_write(thr, ds->devpoll, ds->changes, n);
if (nxt_slow_path(ret != NXT_OK)) {
do {
nxt_devpoll_change_error(thr, ds, ch->event);
ch++;
} while (ch < end);
ds->nchanges = 0;
return NXT_ERROR;
}
do {
if (ch->op == NXT_DEVPOLL_ADD) {
ret = nxt_event_set_fd_hash_add(&ds->fd_hash, ch->fd, ch->event);
if (nxt_slow_path(ret != NXT_OK)) {
nxt_devpoll_change_error(thr, ds, ch->event);
retval = NXT_ERROR;
}
} else if (ch->op == NXT_DEVPOLL_DELETE) {
nxt_event_set_fd_hash_delete(&ds->fd_hash, ch->fd, 0);
}
/* Nothing tp do for NXT_DEVPOLL_UPDATE and NXT_DEVPOLL_CHANGE. */
ch++;
} while (ch < end);
ds->nchanges = 0;
return retval;
}
static void
nxt_devpoll_change_error(nxt_thread_t *thr, nxt_devpoll_event_set_t *ds,
nxt_event_fd_t *ev)
{
ev->read = NXT_EVENT_INACTIVE;
ev->write = NXT_EVENT_INACTIVE;
nxt_thread_work_queue_add(thr, &thr->work_queue.main,
ev->error_handler, ev, ev->data, ev->log);
nxt_event_set_fd_hash_delete(&ds->fd_hash, ev->fd, 1);
nxt_devpoll_remove(thr, ds, ev->fd);
}
static void
nxt_devpoll_remove(nxt_thread_t *thr, nxt_devpoll_event_set_t *ds, nxt_fd_t fd)
{
int n;
struct pollfd pfd;
pfd.fd = fd;
pfd.events = 0;
pfd.revents = 0;
n = ioctl(ds->devpoll, DP_ISPOLLED, &pfd);
nxt_log_debug(thr->log, "ioctl(%d, DP_ISPOLLED, %d): %d",
ds->devpoll, fd, n);
if (n == 0) {
/* The file descriptor is not in the set. */
return;
}
if (n == -1) {
nxt_log_alert(thr->log, "ioctl(%d, DP_ISPOLLED, %d) failed %E",
ds->devpoll, fd, nxt_errno);
/* Fall through. */
}
/* n == 1: the file descriptor is in the set. */
nxt_log_debug(thr->log, "devpoll %d remove fd:%d", ds->devpoll, fd);
pfd.fd = fd;
pfd.events = POLLREMOVE;
pfd.revents = 0;
nxt_devpoll_write(thr, ds->devpoll, &pfd, 1);
}
static nxt_int_t
nxt_devpoll_write(nxt_thread_t *thr, int devpoll, struct pollfd *pfd,
size_t n)
{
nxt_log_debug(thr->log, "devpoll write(%d) changes:%uz", devpoll, n);
n *= sizeof(struct pollfd);
if (nxt_slow_path(write(devpoll, pfd, n) == (ssize_t) n)) {
return NXT_OK;
}
nxt_log_alert(thr->log, "devpoll write(%d) failed %E",
devpoll, nxt_errno);
return NXT_ERROR;
}
static void
nxt_devpoll_set_poll(nxt_thread_t *thr, nxt_event_set_t *event_set,
nxt_msec_t timeout)
{
int nevents;
nxt_fd_t fd;
nxt_int_t i;
nxt_err_t err;
nxt_uint_t events, level;
struct dvpoll dvp;
struct pollfd *pfd;
nxt_event_fd_t *ev;
nxt_devpoll_event_set_t *ds;
ds = &event_set->devpoll;
if (ds->nchanges != 0) {
if (nxt_devpoll_commit_changes(thr, ds) != NXT_OK) {
/* Error handlers have been enqueued on failure. */
timeout = 0;
}
}
nxt_log_debug(thr->log, "ioctl(%d, DP_POLL) timeout:%M",
ds->devpoll, timeout);
dvp.dp_fds = ds->events;
dvp.dp_nfds = ds->mevents;
dvp.dp_timeout = timeout;
nevents = ioctl(ds->devpoll, DP_POLL, &dvp);
err = (nevents == -1) ? nxt_errno : 0;
nxt_thread_time_update(thr);
nxt_log_debug(thr->log, "ioctl(%d, DP_POLL): %d", ds->devpoll, nevents);
if (nevents == -1) {
level = (err == NXT_EINTR) ? NXT_LOG_INFO : NXT_LOG_ALERT;
nxt_log_error(level, thr->log, "ioctl(%d, DP_POLL) failed %E",
ds->devpoll, err);
return;
}
for (i = 0; i < nevents; i++) {
pfd = &ds->events[i];
fd = pfd->fd;
events = pfd->revents;
ev = nxt_event_set_fd_hash_get(&ds->fd_hash, fd);
if (nxt_slow_path(ev == NULL)) {
nxt_log_alert(thr->log, "ioctl(%d, DP_POLL) returned invalid "
"fd:%d ev:%04Xd rev:%04uXi",
ds->devpoll, fd, pfd->events, events);
nxt_devpoll_remove(thr, ds, fd);
continue;
}
nxt_log_debug(ev->log, "devpoll: fd:%d ev:%04uXi rd:%d wr:%d",
fd, events, ev->read, ev->write);
if (nxt_slow_path(events & (POLLERR | POLLHUP | POLLNVAL)) != 0) {
nxt_log_alert(ev->log,
"ioctl(%d, DP_POLL) error fd:%d ev:%04Xd rev:%04uXi",
ds->devpoll, fd, pfd->events, events);
nxt_thread_work_queue_add(thr, &thr->work_queue.main,
ev->error_handler, ev, ev->data, ev->log);
continue;
}
if (events & POLLIN) {
ev->read_ready = 1;
if (ev->read != NXT_EVENT_BLOCKED) {
if (ev->read == NXT_EVENT_ONESHOT) {
nxt_devpoll_disable_read(event_set, ev);
}
nxt_thread_work_queue_add(thr, ev->read_work_queue,
ev->read_handler,
ev, ev->data, ev->log);
}
}
if (events & POLLOUT) {
ev->write_ready = 1;
if (ev->write != NXT_EVENT_BLOCKED) {
if (ev->write == NXT_EVENT_ONESHOT) {
nxt_devpoll_disable_write(event_set, ev);
}
nxt_thread_work_queue_add(thr, ev->write_work_queue,
ev->write_handler,
ev, ev->data, ev->log);
}
}
}
}

45
src/nxt_djb_hash.c Normal file
View File

@@ -0,0 +1,45 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
uint32_t
nxt_djb_hash(const void *data, size_t len)
{
uint32_t hash;
const u_char *p;
p = data;
hash = NXT_DJB_HASH_INIT;
while (len != 0) {
hash = nxt_djb_hash_add(hash, *p++);
len--;
}
return hash;
}
uint32_t
nxt_djb_hash_lowcase(const void *data, size_t len)
{
u_char c;
uint32_t hash;
const u_char *p;
p = data;
hash = NXT_DJB_HASH_INIT;
while (len != 0) {
c = *p++;
hash = nxt_djb_hash_add(hash, nxt_lowcase(c));
len--;
}
return hash;
}

26
src/nxt_djb_hash.h Normal file
View File

@@ -0,0 +1,26 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_DJB_HASH_H_INCLUDED_
#define _NXT_DJB_HASH_H_INCLUDED_
/* A fast and simple hash function by Daniel J. Bernstein. */
NXT_EXPORT uint32_t nxt_djb_hash(const void *data, size_t len);
NXT_EXPORT uint32_t nxt_djb_hash_lowcase(const void *data, size_t len);
#define NXT_DJB_HASH_INIT 5381
#define \
nxt_djb_hash_add(hash, val) \
((uint32_t) ((((hash) << 5) + (hash)) ^ (uint32_t) (val)))
#endif /* _NXT_DJB_HASH_H_INCLUDED_ */

86
src/nxt_dyld.c Normal file
View File

@@ -0,0 +1,86 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
nxt_int_t
nxt_dyld_load(nxt_dyld_t *dyld)
{
const char *err;
dyld->handle = dlopen(dyld->name, RTLD_NOW | RTLD_GLOBAL);
if (dyld->handle != NULL) {
nxt_thread_log_debug("dlopen(\"%s\")", dyld->name);
return NXT_OK;
}
err = dlerror();
if (err == NULL) {
err = "(null)";
}
nxt_thread_log_alert("dlopen(\"%s\") failed: %s", dyld->name, err);
return NXT_ERROR;
}
void *
nxt_dyld_symbol(nxt_dyld_t *dyld, const char *symbol)
{
void *handle, *s;
const char *name;
const char *err;
if (dyld == NXT_DYLD_ANY) {
handle = RTLD_DEFAULT;
name = "RTLD_DEFAULT";
} else {
handle = dyld->handle;
name = dyld->name;
}
s = dlsym(handle, symbol);
if (s != NULL) {
nxt_thread_log_debug("dlsym(\"%s\", \"%s\")", name, symbol);
return s;
}
err = dlerror();
if (err == NULL) {
err = "(null)";
}
nxt_thread_log_alert("dlsym(\"%s\", \"%s\") failed: %s", name, symbol, err);
return s;
}
nxt_int_t
nxt_dyld_unload(nxt_dyld_t *dyld)
{
const char *err;
if (dlclose(dyld->handle) == 0) {
nxt_thread_log_debug("dlclose(\"%s\")", dyld->name);
return NXT_OK;
}
err = dlerror();
if (err == NULL) {
err = "(null)";
}
nxt_thread_log_alert("dlclose(\"%s\") failed: %s", dyld->name, err);
return NXT_ERROR;
}

30
src/nxt_dyld.h Normal file
View File

@@ -0,0 +1,30 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_UNIX_DYLD_H_INCLUDED_
#define _NXT_UNIX_DYLD_H_INCLUDED_
typedef struct {
void *handle;
char *name;
} nxt_dyld_t;
#define NXT_DYLD_ANY RTLD_DEFAULT
#define \
nxt_dyld_is_valid(dyld) \
((dyld)->handle != NULL)
NXT_EXPORT nxt_int_t nxt_dyld_load(nxt_dyld_t *dyld);
NXT_EXPORT void *nxt_dyld_symbol(nxt_dyld_t *dyld, const char *symbol);
NXT_EXPORT nxt_int_t nxt_dyld_unload(nxt_dyld_t *dyld);
#endif /* _NXT_UNIX_DYLD_H_INCLUDED_ */

1167
src/nxt_epoll.c Normal file

File diff suppressed because it is too large Load Diff

152
src/nxt_errno.c Normal file
View File

@@ -0,0 +1,152 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* The strerror() messages are copied because:
*
* 1) strerror() and strerror_r() functions are not Async-Signal-Safe,
* therefore, they can not be used in signal handlers;
*
* 2) a direct sys_errlist[] array may be used instead of these functions,
* but Linux linker warns about this usage:
*
* warning: `sys_errlist' is deprecated; use `strerror' or `strerror_r' instead
* warning: `sys_nerr' is deprecated; use `strerror' or `strerror_r' instead
*
* causing false bug reports.
*/
static u_char *nxt_bootstrap_strerror(nxt_err_t err, u_char *errstr,
size_t size);
static u_char *nxt_runtime_strerror(nxt_err_t err, u_char *errstr, size_t size);
nxt_strerror_t nxt_strerror = nxt_bootstrap_strerror;
static nxt_str_t *nxt_sys_errlist;
static nxt_uint_t nxt_sys_nerr;
nxt_int_t
nxt_strerror_start(void)
{
char *msg;
u_char *p;
size_t size, len, n;
nxt_uint_t err, invalid;
/* The last entry. */
size = sizeof("Unknown error") - 1;
/*
* Linux has holes for error codes 41 and 58, so the loop
* stops only after 100 invalid codes in succession.
*/
for (invalid = 0; invalid < 100 && nxt_sys_nerr < 65536; nxt_sys_nerr++) {
nxt_set_errno(0);
msg = strerror((int) nxt_sys_nerr);
/*
* strerror() behaviour on passing invalid error code depends
* on OS and version:
* Linux returns "Unknown error NN";
* FreeBSD, NetBSD and OpenBSD return "Unknown error: NN"
* and set errno to EINVAL;
* Solaris 10 returns "Unknown error" and sets errno to EINVAL;
* Solaris 9 returns "Unknown error";
* Solaris 2 returns NULL;
* MacOSX returns "Unknown error: NN";
* AIX returns "Error NNN occurred.";
* HP-UX returns "Unknown error" for invalid codes lesser than 250
* or empty string for larger codes.
*/
if (msg == NULL) {
invalid++;
continue;
}
len = nxt_strlen(msg);
size += len;
if (len == 0 /* HP-UX empty strings. */
|| nxt_errno == NXT_EINVAL
|| nxt_memcmp(msg, "Unknown error", 13) == 0)
{
invalid++;
continue;
}
#if (NXT_AIX)
if (nxt_memcmp(msg, "Error ", 6) == 0
&& nxt_memcmp(msg + len - 10, " occurred.", 9) == 0)
{
invalid++;
continue;
}
#endif
}
nxt_sys_nerr -= invalid;
nxt_main_log_debug("sys_nerr: %d", nxt_sys_nerr);
n = (nxt_sys_nerr + 1) * sizeof(nxt_str_t);
nxt_sys_errlist = nxt_malloc(n + size);
if (nxt_sys_errlist == NULL) {
return NXT_ERROR;
}
p = (u_char *) nxt_sys_errlist + n;
for (err = 0; err < nxt_sys_nerr; err++) {
msg = strerror((int) err);
len = nxt_strlen(msg);
nxt_sys_errlist[err].len = len;
nxt_sys_errlist[err].data = p;
p = nxt_cpymem(p, msg, len);
}
nxt_sys_errlist[err].len = 13;
nxt_sys_errlist[err].data = p;
nxt_memcpy(p, "Unknown error", 13);
nxt_strerror = nxt_runtime_strerror;
return NXT_OK;
}
static u_char *
nxt_bootstrap_strerror(nxt_err_t err, u_char *errstr, size_t size)
{
return nxt_cpystrn(errstr, (u_char *) strerror(err), size);
}
static u_char *
nxt_runtime_strerror(nxt_err_t err, u_char *errstr, size_t size)
{
nxt_str_t *msg;
nxt_uint_t n;
n = nxt_min((nxt_uint_t) err, nxt_sys_nerr);
msg = &nxt_sys_errlist[n];
size = nxt_min(size, msg->len);
return nxt_cpymem(errstr, msg->data, size);
}

88
src/nxt_errno.h Normal file
View File

@@ -0,0 +1,88 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_UNIX_ERRNO_H_INCLUDED_
#define _NXT_UNIX_ERRNO_H_INCLUDED_
typedef int nxt_err_t;
#define NXT_EPERM EPERM
#define NXT_ENOENT ENOENT
#define NXT_ENOPATH ENOENT
#define NXT_ESRCH ESRCH
#define NXT_EINTR EINTR
#define NXT_ECHILD ECHILD
#define NXT_ENOMEM ENOMEM
#define NXT_EACCES EACCES
#define NXT_EBUSY EBUSY
#define NXT_EEXIST EEXIST
#define NXT_EXDEV EXDEV
#define NXT_ENOTDIR ENOTDIR
#define NXT_EISDIR EISDIR
#define NXT_EINVAL EINVAL
#define NXT_ENOSPC ENOSPC
#define NXT_EPIPE EPIPE
#define NXT_EINPROGRESS EINPROGRESS
#define NXT_EOPNOTSUPP EOPNOTSUPP
#define NXT_EADDRINUSE EADDRINUSE
#define NXT_ECONNABORTED ECONNABORTED
#define NXT_ECONNRESET ECONNRESET
#define NXT_ENOTCONN ENOTCONN
#define NXT_ETIMEDOUT ETIMEDOUT
#define NXT_ECONNREFUSED ECONNREFUSED
#define NXT_ENAMETOOLONG ENAMETOOLONG
#define NXT_ENETDOWN ENETDOWN
#define NXT_ENETUNREACH ENETUNREACH
#define NXT_EHOSTDOWN EHOSTDOWN
#define NXT_EHOSTUNREACH EHOSTUNREACH
#define NXT_ENOSYS ENOSYS
#define NXT_ECANCELED ECANCELED
#define NXT_EILSEQ EILSEQ
#define NXT_ETIME ETIME
#define NXT_ENOMOREFILES 0
#if (NXT_HPUX)
/* HP-UX uses EWOULDBLOCK instead of EAGAIN. */
#define NXT_EAGAIN EWOULDBLOCK
#else
#define NXT_EAGAIN EAGAIN
#endif
#define NXT_OK 0
#define NXT_ERROR (-1)
#define NXT_AGAIN (-2)
#define NXT_DECLINED (-3)
#define NXT_DONE (-4)
#define \
nxt_errno \
errno
#define \
nxt_socket_errno \
errno
#define \
nxt_set_errno(err) \
errno = err
#define \
nxt_set_socket_errno(err) \
errno = err
nxt_int_t nxt_strerror_start(void);
typedef u_char *(*nxt_strerror_t)(nxt_err_t err, u_char *errstr, size_t size);
extern nxt_strerror_t nxt_strerror;
#endif /* _NXT_UNIX_ERRNO_H_INCLUDED_ */

234
src/nxt_event_conn.c Normal file
View File

@@ -0,0 +1,234 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static void nxt_event_conn_shutdown_socket(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_event_conn_close_socket(nxt_thread_t *thr, void *obj,
void *data);
nxt_event_conn_io_t nxt_unix_event_conn_io = {
nxt_event_conn_io_connect,
nxt_event_conn_io_accept,
nxt_event_conn_io_read,
nxt_event_conn_io_recvbuf,
nxt_event_conn_io_recv,
nxt_event_conn_io_write,
nxt_event_conn_io_write_chunk,
#if (NXT_HAVE_LINUX_SENDFILE)
nxt_linux_event_conn_io_sendfile,
#elif (NXT_HAVE_FREEBSD_SENDFILE)
nxt_freebsd_event_conn_io_sendfile,
#elif (NXT_HAVE_MACOSX_SENDFILE)
nxt_macosx_event_conn_io_sendfile,
#elif (NXT_HAVE_SOLARIS_SENDFILEV)
nxt_solaris_event_conn_io_sendfilev,
#elif (NXT_HAVE_AIX_SEND_FILE)
nxt_aix_event_conn_io_send_file,
#elif (NXT_HAVE_HPUX_SENDFILE)
nxt_hpux_event_conn_io_sendfile,
#else
nxt_event_conn_io_sendbuf,
#endif
nxt_event_conn_io_writev,
nxt_event_conn_io_send,
nxt_event_conn_io_shutdown,
};
nxt_event_conn_t *
nxt_event_conn_create(nxt_mem_pool_t *mp, nxt_log_t *log)
{
nxt_thread_t *thr;
nxt_event_conn_t *c;
static nxt_atomic_t ident = 1;
c = nxt_mem_zalloc(mp, sizeof(nxt_event_conn_t));
if (nxt_slow_path(c == NULL)) {
return NULL;
}
c->mem_pool = mp;
c->socket.fd = -1;
c->socket.log = &c->log;
c->log = *log;
/* The while loop skips possible uint32_t overflow. */
while (c->log.ident == 0) {
c->log.ident = (uint32_t) nxt_atomic_fetch_add(&ident, 1);
}
thr = nxt_thread();
thr->engine->connections++;
c->io = thr->engine->event->io;
c->max_chunk = NXT_INT32_T_MAX;
c->sendfile = NXT_CONN_SENDFILE_UNSET;
c->socket.read_work_queue = &thr->work_queue.main;
c->socket.write_work_queue = &thr->work_queue.main;
nxt_event_conn_timer_init(&c->read_timer, c, c->socket.read_work_queue);
nxt_event_conn_timer_init(&c->write_timer, c, c->socket.write_work_queue);
nxt_log_debug(&c->log, "event connections: %uD", thr->engine->connections);
return c;
}
void
nxt_event_conn_io_shutdown(nxt_thread_t *thr, void *obj, void *data)
{
int ret;
socklen_t len;
struct linger linger;
nxt_event_conn_t *c;
c = obj;
nxt_log_debug(thr->log, "event conn shutdown");
if (c->socket.timedout) {
/*
* A reset of timed out connection on close
* to release kernel memory associated with socket.
* This also causes sending TCP/IP RST to a peer.
*/
linger.l_onoff = 1;
linger.l_linger = 0;
len = sizeof(struct linger);
ret = setsockopt(c->socket.fd, SOL_SOCKET, SO_LINGER, &linger, len);
if (nxt_slow_path(ret != 0)) {
nxt_log_error(NXT_LOG_CRIT, thr->log,
"setsockopt(%d, SO_LINGER) failed %E",
c->socket.fd, nxt_socket_errno);
}
}
c->write_state->close_handler(thr, c, data);
}
void
nxt_event_conn_close(nxt_thread_t *thr, nxt_event_conn_t *c)
{
nxt_work_queue_t *wq;
nxt_work_handler_t handler;
nxt_log_debug(thr->log, "event conn close fd:%d", c->socket.fd);
nxt_thread_work_queue_drop(thr, c);
nxt_thread_work_queue_drop(thr, &c->read_timer);
nxt_thread_work_queue_drop(thr, &c->write_timer);
nxt_event_timer_delete(thr->engine, &c->read_timer);
nxt_event_timer_delete(thr->engine, &c->write_timer);
nxt_event_fd_close(thr->engine, &c->socket);
thr->engine->connections--;
nxt_log_debug(thr->log, "event connections: %uD", thr->engine->connections);
if (thr->engine->batch != 0) {
if (c->socket.closed || c->socket.error != 0) {
wq = &thr->engine->close_work_queue;
handler = nxt_event_conn_close_socket;
} else {
wq = &thr->engine->shutdown_work_queue;
handler = nxt_event_conn_shutdown_socket;
}
nxt_thread_work_queue_add(thr, wq, handler,
(void *) (uintptr_t) c->socket.fd, NULL,
&nxt_main_log);
} else {
nxt_socket_close(c->socket.fd);
}
c->socket.fd = -1;
}
static void
nxt_event_conn_shutdown_socket(nxt_thread_t *thr, void *obj, void *data)
{
nxt_socket_t s;
s = (nxt_socket_t) (uintptr_t) obj;
nxt_socket_shutdown(s, SHUT_RDWR);
nxt_thread_work_queue_add(thr, &thr->engine->close_work_queue,
nxt_event_conn_close_socket,
(void *) (uintptr_t) s, NULL, &nxt_main_log);
}
static void
nxt_event_conn_close_socket(nxt_thread_t *thr, void *obj, void *data)
{
nxt_socket_t s;
s = (nxt_socket_t) (uintptr_t) obj;
nxt_socket_close(s);
}
void
nxt_event_conn_timer(nxt_event_engine_t *engine, nxt_event_conn_t *c,
const nxt_event_conn_state_t *state, nxt_event_timer_t *tev)
{
nxt_msec_t timer;
if (state->timer_value != NULL) {
timer = state->timer_value(c, state->timer_data);
if (timer != 0) {
tev->handler = state->timer_handler;
nxt_event_timer_add(engine, tev, timer);
}
}
}
void
nxt_event_conn_work_queue_set(nxt_event_conn_t *c, nxt_work_queue_t *wq)
{
#if 0
nxt_thread_t *thr;
nxt_work_queue_t *owq;
thr = nxt_thread();
owq = c->socket.work_queue;
nxt_thread_work_queue_move(thr, owq, wq, c);
nxt_thread_work_queue_move(thr, owq, wq, &c->read_timer);
nxt_thread_work_queue_move(thr, owq, wq, &c->write_timer);
#endif
c->read_work_queue = wq;
c->write_work_queue = wq;
c->read_timer.work_queue = wq;
c->write_timer.work_queue = wq;
}

382
src/nxt_event_conn.h Normal file
View File

@@ -0,0 +1,382 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_EVENT_CONN_H_INCLUDED_
#define _NXT_EVENT_CONN_H_INCLUDED_
typedef nxt_msec_t (*nxt_event_conn_timer_val_t)(nxt_event_conn_t *c,
uintptr_t data);
#define NXT_EVENT_NO_BUF_PROCESS 0
#define NXT_EVENT_BUF_PROCESS 1
#define NXT_EVENT_BUF_COMPLETION 1
#define NXT_EVENT_TIMER_AUTORESET 1
#define NXT_EVENT_TIMER_NO_AUTORESET 0
typedef struct {
uint8_t process_buffers;
uint8_t autoreset_timer;
nxt_work_handler_t ready_handler;
nxt_work_handler_t close_handler;
nxt_work_handler_t error_handler;
nxt_work_handler_t timer_handler;
nxt_event_conn_timer_val_t timer_value;
uintptr_t timer_data;
} nxt_event_conn_state_t;
typedef struct {
double average;
size_t limit;
size_t limit_after;
size_t max_limit;
nxt_msec_t last;
} nxt_event_write_rate_t;
typedef struct {
void (*connect)(nxt_thread_t *thr, void *obj,
void *data);
void (*accept)(nxt_thread_t *thr, void *obj,
void *data);
/*
* The read() with NULL c->read buffer waits readiness of a connection
* to avoid allocation of read buffer if the connection will time out
* or will be closed with error. The kqueue-specific read() can also
* detect case if a client did not sent anything and has just closed the
* connection without errors. In the latter case state's close_handler
* is called.
*/
void (*read)(nxt_thread_t *thr, void *obj,
void *data);
ssize_t (*recvbuf)(nxt_event_conn_t *c, nxt_buf_t *b);
ssize_t (*recv)(nxt_event_conn_t *c, void *buf,
size_t size, nxt_uint_t flags);
/*
* The write() is an interface to write a buffer chain with a given rate
* limit. It calls write_chunk() in a cycle and handles write event timer.
*/
void (*write)(nxt_thread_t *thr, void *obj,
void *data);
/*
* The write_chunk() interface writes a buffer chain with a given limit
* and toggles write event. SSL/TLS libraries' write_chunk() interface
* buffers data and calls the library specific send() interface to write
* the buffered data eventually.
*/
ssize_t (*write_chunk)(nxt_thread_t *thr,
nxt_event_conn_t *c, nxt_buf_t *b,
size_t limit);
/*
* The sendbuf() is an interface for OS-specific sendfile
* implementations or simple writev().
*/
ssize_t (*sendbuf)(nxt_event_conn_t *c, nxt_buf_t *b,
size_t limit);
/*
* The writev() is an interface to write several nxt_iobuf_t buffers.
*/
ssize_t (*writev)(nxt_event_conn_t *c,
nxt_iobuf_t *iob, nxt_uint_t niob);
/*
* The send() is an interface to write a single buffer. SSL/TLS
* libraries' send() interface handles also the libraries' errors.
*/
ssize_t (*send)(nxt_event_conn_t *c, void *buf,
size_t size);
void (*shutdown)(nxt_thread_t *thr, void *obj,
void *data);
} nxt_event_conn_io_t;
struct nxt_event_conn_s {
/*
* Must be the first field, since nxt_event_fd_t
* and nxt_event_conn_t are used interchangeably.
*/
nxt_event_fd_t socket;
nxt_buf_t *read;
const nxt_event_conn_state_t *read_state;
nxt_work_queue_t *read_work_queue;
nxt_event_timer_t read_timer;
nxt_buf_t *write;
const nxt_event_conn_state_t *write_state;
nxt_work_queue_t *write_work_queue;
nxt_event_write_rate_t *rate;
nxt_event_timer_t write_timer;
nxt_off_t sent;
uint32_t max_chunk;
uint32_t nbytes;
nxt_event_conn_io_t *io;
#if (NXT_SSLTLS || NXT_THREADS)
/* SunC does not support "zero-sized struct/union". */
union {
#if (NXT_SSLTLS)
void *ssltls;
#endif
#if (NXT_THREADS)
nxt_thread_pool_t *thread_pool;
#endif
} u;
#endif
nxt_mem_pool_t *mem_pool;
nxt_log_t log;
nxt_listen_socket_t *listen;
nxt_sockaddr_t *remote;
nxt_sockaddr_t *local;
const char *action;
uint8_t peek;
uint8_t blocked; /* 1 bit */
uint8_t delayed; /* 1 bit */
#define NXT_CONN_SENDFILE_OFF 0
#define NXT_CONN_SENDFILE_ON 1
#define NXT_CONN_SENDFILE_UNSET 3
uint8_t sendfile; /* 2 bits */
uint8_t tcp_nodelay; /* 1 bit */
nxt_queue_link_t link;
};
/*
* The nxt_event_conn_listen_t is separated from nxt_listen_socket_t
* because nxt_listen_socket_t is one per process whilst each worker
* thread uses own nxt_event_conn_listen_t.
*/
typedef struct {
/* Must be the first field. */
nxt_event_fd_t socket;
uint32_t ready;
uint32_t batch;
/* An accept() interface is cached to minimize memory accesses. */
void (*accept)(nxt_thread_t *thr, void *obj,
void *data);
nxt_listen_socket_t *listen;
nxt_event_timer_t timer;
nxt_queue_link_t link;
} nxt_event_conn_listen_t;
#define \
nxt_event_conn_io_handle(thr, wq, handler, c, data) \
do { \
if (thr->engine->batch != 0) { \
nxt_thread_work_queue_add(thr, wq, handler, c, data, thr->log); \
\
} else { \
handler(thr, c, data); \
} \
} while (0)
#define \
nxt_event_conn_timer_init(ev, c, wq) \
do { \
(ev)->work_queue = (wq); \
(ev)->log = &(c)->log; \
(ev)->precision = NXT_EVENT_TIMER_DEFAULT_PRECISION; \
nxt_event_timer_ident((ev), (c)->socket.fd); \
} while (0)
#define \
nxt_event_read_timer_conn(ev) \
nxt_event_timer_data(ev, nxt_event_conn_t, read_timer)
#define \
nxt_event_write_timer_conn(ev) \
nxt_event_timer_data(ev, nxt_event_conn_t, write_timer)
#if (NXT_HAVE_UNIX_DOMAIN)
#define \
nxt_event_conn_tcp_nodelay_on(c) \
do { \
nxt_int_t ret; \
\
if ((c)->remote->u.sockaddr.sa_family != AF_UNIX) { \
ret = nxt_socket_setsockopt((c)->socket.fd, IPPROTO_TCP, \
TCP_NODELAY, 1); \
\
(c)->tcp_nodelay = (ret == NXT_OK); \
} \
} while (0)
#else
#define \
nxt_event_conn_tcp_nodelay_on(c) \
do { \
nxt_int_t ret; \
\
ret = nxt_socket_setsockopt((c)->socket.fd, IPPROTO_TCP, \
TCP_NODELAY, 1); \
\
(c)->tcp_nodelay = (ret == NXT_OK); \
} while (0)
#endif
NXT_EXPORT nxt_event_conn_t *nxt_event_conn_create(nxt_mem_pool_t *mp,
nxt_log_t *log);
void nxt_event_conn_io_shutdown(nxt_thread_t *thr, void *obj,
void *data);
NXT_EXPORT void nxt_event_conn_close(nxt_thread_t *thr, nxt_event_conn_t *c);
NXT_EXPORT void nxt_event_conn_timer(nxt_event_engine_t *engine,
nxt_event_conn_t *c, const nxt_event_conn_state_t *state,
nxt_event_timer_t *tev);
NXT_EXPORT void nxt_event_conn_work_queue_set(nxt_event_conn_t *c,
nxt_work_queue_t *wq);
NXT_EXPORT void nxt_event_conn_connect(nxt_thread_t *thr, nxt_event_conn_t *c);
void nxt_event_conn_batch_socket(nxt_thread_t *thr, void *obj,
void *data);
void nxt_event_conn_io_connect(nxt_thread_t *thr, void *obj,
void *data);
nxt_int_t nxt_event_conn_socket(nxt_thread_t *thr,
nxt_event_conn_t *c);
void nxt_event_conn_connect_test(nxt_thread_t *thr, void *obj,
void *data);
void nxt_event_conn_connect_error(nxt_thread_t *thr, void *obj,
void *data);
NXT_EXPORT nxt_int_t nxt_event_conn_listen(nxt_thread_t *thr,
nxt_listen_socket_t *ls);
void nxt_event_conn_io_accept(nxt_thread_t *thr, void *obj,
void *data);
NXT_EXPORT void nxt_event_conn_accept(nxt_thread_t *thr,
nxt_event_conn_listen_t *cls, nxt_event_conn_t *c);
void nxt_event_conn_accept_error(nxt_thread_t *thr,
nxt_event_conn_listen_t *cls, const char *accept_syscall, nxt_err_t err);
NXT_EXPORT void nxt_event_conn_read(nxt_thread_t *thr, nxt_event_conn_t *c);
void nxt_event_conn_io_read(nxt_thread_t *thr, void *obj,
void *data);
ssize_t nxt_event_conn_io_recvbuf(nxt_event_conn_t *c, nxt_buf_t *b);
ssize_t nxt_event_conn_io_recv(nxt_event_conn_t *c, void *buf,
size_t size, nxt_uint_t flags);
NXT_EXPORT void nxt_event_conn_write(nxt_thread_t *thr, nxt_event_conn_t *c);
size_t nxt_event_conn_write_limit(nxt_event_conn_t *c);
nxt_bool_t nxt_event_conn_write_delayed(nxt_event_engine_t *engine,
nxt_event_conn_t *c, size_t sent);
void nxt_event_conn_io_write(nxt_thread_t *thr, void *obj,
void *data);
ssize_t nxt_event_conn_io_write_chunk(nxt_thread_t *thr,
nxt_event_conn_t *c, nxt_buf_t *b, size_t limit);
ssize_t nxt_event_conn_io_writev(nxt_event_conn_t *c,
nxt_iobuf_t *iob, nxt_uint_t niob);
ssize_t nxt_event_conn_io_send(nxt_event_conn_t *c, void *buf,
size_t size);
NXT_EXPORT void nxt_event_conn_job_sendfile(nxt_thread_t *thr,
nxt_event_conn_t *c);
#define \
nxt_event_conn_connect_enqueue(thr, c) \
nxt_thread_work_queue_add(thr, &thr->engine->socket_work_queue, \
nxt_event_conn_batch_socket, \
c, c->socket.data, c->socket.log)
#define \
nxt_event_conn_read_enqueue(thr, c) \
do { \
c->socket.read_work_queue = &thr->engine->read_work_queue; \
\
nxt_thread_work_queue_add(thr, &thr->engine->read_work_queue, \
c->io->read, c, c->socket.data, \
c->socket.log); \
} while (0)
#define \
nxt_event_conn_write_enqueue(thr, c) \
do { \
c->socket.write_work_queue = &thr->engine->write_work_queue; \
\
nxt_thread_work_queue_add(thr, &thr->engine->write_work_queue, \
c->io->write, c, c->socket.data, \
c->socket.log); \
} while (0)
extern nxt_event_conn_io_t nxt_unix_event_conn_io;
typedef struct {
/*
* Client and peer connections are not embedded because already
* existent connections can be switched to the event connection proxy.
*/
nxt_event_conn_t *client;
nxt_event_conn_t *peer;
nxt_buf_t *client_buffer;
nxt_buf_t *peer_buffer;
size_t client_buffer_size;
size_t peer_buffer_size;
nxt_msec_t client_wait_timeout;
nxt_msec_t connect_timeout;
nxt_msec_t reconnect_timeout;
nxt_msec_t peer_wait_timeout;
nxt_msec_t client_write_timeout;
nxt_msec_t peer_write_timeout;
uint8_t connected; /* 1 bit */
uint8_t delayed; /* 1 bit */
uint8_t retries; /* 8 bits */
nxt_work_handler_t completion_handler;
} nxt_event_conn_proxy_t;
NXT_EXPORT nxt_event_conn_proxy_t *nxt_event_conn_proxy_create(
nxt_event_conn_t *c);
NXT_EXPORT void nxt_event_conn_proxy(nxt_event_conn_proxy_t *p);
#endif /* _NXT_EVENT_CONN_H_INCLUDED_ */

367
src/nxt_event_conn_accept.c Normal file
View File

@@ -0,0 +1,367 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* A listen socket handler calls an event facility specific io_accept()
* method. The method accept()s a new connection and then calls
* nxt_event_conn_accept() to handle the new connection and to prepare
* for a next connection to avoid just dropping next accept()ed socket
* if no more connections allowed. If there are no available connections
* an idle connection would be closed. If there are no idle connections
* then new connections will not be accept()ed for 1 second.
*/
static nxt_event_conn_t *nxt_event_conn_accept_alloc(nxt_thread_t *thr,
nxt_event_conn_listen_t *cls);
static void nxt_event_conn_listen_handler(nxt_thread_t *thr, void *obj,
void *data);
static nxt_event_conn_t *nxt_event_conn_accept_next(nxt_thread_t *thr,
nxt_event_conn_listen_t *cls);
static nxt_int_t nxt_event_conn_accept_close_idle(nxt_thread_t *thr,
nxt_event_conn_listen_t *cls);
static void nxt_event_conn_listen_event_error(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_event_conn_listen_timer_handler(nxt_thread_t *thr, void *obj,
void *data);
nxt_int_t
nxt_event_conn_listen(nxt_thread_t *thr, nxt_listen_socket_t *ls)
{
nxt_event_conn_listen_t *cls;
cls = nxt_zalloc(sizeof(nxt_event_conn_listen_t));
if (nxt_fast_path(cls != NULL)) {
cls->socket.fd = ls->socket;
cls->batch = thr->engine->batch;
if (cls->batch != 0) {
cls->socket.read_work_queue = &thr->engine->accept_work_queue;
} else {
cls->socket.read_work_queue = &thr->work_queue.main;
cls->batch = 1;
}
cls->socket.read_handler = nxt_event_conn_listen_handler;
cls->socket.error_handler = nxt_event_conn_listen_event_error;
cls->socket.log = &nxt_main_log;
cls->accept = thr->engine->event->io->accept;
cls->listen = ls;
cls->timer.work_queue = &thr->work_queue.main;
cls->timer.handler = nxt_event_conn_listen_timer_handler;
cls->timer.log = &nxt_main_log;
nxt_event_timer_ident(&cls->timer, cls->socket.fd);
if (nxt_event_conn_accept_alloc(thr, cls) != NULL) {
nxt_event_fd_enable_accept(thr->engine, &cls->socket);
nxt_queue_insert_head(&thr->engine->listen_connections, &cls->link);
}
return NXT_OK;
}
return NXT_ERROR;
}
static nxt_event_conn_t *
nxt_event_conn_accept_alloc(nxt_thread_t *thr, nxt_event_conn_listen_t *cls)
{
nxt_sockaddr_t *sa, *remote;
nxt_mem_pool_t *mp;
nxt_event_conn_t *c;
nxt_listen_socket_t *ls;
if (thr->engine->connections < thr->engine->max_connections) {
mp = nxt_mem_pool_create(cls->listen->mem_pool_size);
if (nxt_fast_path(mp != NULL)) {
/* This allocation cannot fail. */
c = nxt_event_conn_create(mp, cls->socket.log);
cls->socket.data = c;
c->socket.read_work_queue = cls->socket.read_work_queue;
c->socket.write_ready = 1;
ls = cls->listen;
c->listen = ls;
/* This allocation cannot fail. */
remote = nxt_sockaddr_alloc(mp, ls->socklen);
c->remote = remote;
sa = ls->sockaddr;
remote->type = sa->type;
/*
* Set address family for unspecified Unix domain,
* because these sockaddr's are not be passed to accept().
*/
remote->u.sockaddr.sa_family = sa->u.sockaddr.sa_family;
return c;
}
}
return NULL;
}
static void
nxt_event_conn_listen_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_listen_t *cls;
cls = obj;
cls->ready = cls->batch;
cls->accept(thr, cls, data);
}
void
nxt_event_conn_io_accept(nxt_thread_t *thr, void *obj, void *data)
{
socklen_t len;
nxt_socket_t s;
struct sockaddr *sa;
nxt_event_conn_t *c;
nxt_event_conn_listen_t *cls;
cls = obj;
c = data;
cls->ready--;
cls->socket.read_ready = (cls->ready != 0);
len = nxt_socklen(c->remote);
if (len >= sizeof(struct sockaddr)) {
sa = &c->remote->u.sockaddr;
} else {
sa = NULL;
len = 0;
}
s = accept(cls->socket.fd, sa, &len);
if (s == -1) {
nxt_event_conn_accept_error(thr, cls, "accept", nxt_socket_errno);
return;
}
c->socket.fd = s;
#if (NXT_LINUX)
/*
* Linux does not inherit non-blocking mode
* from listen socket for accept()ed socket.
*/
if (nxt_slow_path(nxt_socket_nonblocking(s) != NXT_OK)) {
nxt_socket_close(s);
}
#endif
nxt_log_debug(thr->log, "accept(%d): %d", cls->socket.fd, s);
nxt_event_conn_accept(thr, cls, c);
}
void
nxt_event_conn_accept(nxt_thread_t *thr, nxt_event_conn_listen_t *cls,
nxt_event_conn_t *c)
{
nxt_event_conn_t *next;
nxt_event_timer_ident(&c->read_timer, c->socket.fd);
nxt_event_timer_ident(&c->write_timer, c->socket.fd);
/* This allocation cannot fail. */
(void) nxt_sockaddr_text(c->mem_pool, c->remote, 0);
nxt_log_debug(c->socket.log, "client: %*s",
c->remote->text_len, c->remote->text);
nxt_queue_insert_head(&thr->engine->idle_connections, &c->link);
c->read_work_queue = c->listen->work_queue;
c->write_work_queue = c->listen->work_queue;
if (c->listen->read_after_accept) {
//c->socket.read_ready = 1;
thr->log = c->socket.log;
c->listen->handler(thr, c, NULL);
thr->log = cls->socket.log;
} else {
nxt_thread_work_queue_add(thr, c->write_work_queue,
c->listen->handler, c, NULL, c->socket.log);
}
next = nxt_event_conn_accept_next(thr, cls);
if (next != NULL && cls->socket.read_ready) {
nxt_thread_work_queue_add(thr, cls->socket.read_work_queue,
cls->accept, cls, next, cls->socket.log);
}
}
static nxt_event_conn_t *
nxt_event_conn_accept_next(nxt_thread_t *thr, nxt_event_conn_listen_t *cls)
{
nxt_event_conn_t *c;
cls->socket.data = NULL;
do {
c = nxt_event_conn_accept_alloc(thr, cls);
if (nxt_fast_path(c != NULL)) {
return c;
}
} while (nxt_event_conn_accept_close_idle(thr, cls) == NXT_OK);
nxt_log_alert(cls->socket.log, "no available connections, "
"new connections are not accepted within 1s");
return NULL;
}
static nxt_int_t
nxt_event_conn_accept_close_idle(nxt_thread_t *thr,
nxt_event_conn_listen_t *cls)
{
nxt_queue_t *idle;
nxt_queue_link_t *link;
nxt_event_conn_t *c;
static nxt_log_moderation_t nxt_idle_close_log_moderation = {
NXT_LOG_INFO, 2, "idle connections closed", NXT_LOG_MODERATION
};
idle = &thr->engine->idle_connections;
for (link = nxt_queue_last(idle);
link != nxt_queue_head(idle);
link = nxt_queue_next(link))
{
c = nxt_queue_link_data(link, nxt_event_conn_t, link);
if (!c->socket.read_ready) {
nxt_log_moderate(&nxt_idle_close_log_moderation, NXT_LOG_INFO,
thr->log, "no available connections, "
"close idle connection");
nxt_queue_remove(link);
nxt_event_conn_close(thr, c);
return NXT_OK;
}
}
nxt_event_timer_add(thr->engine, &cls->timer, 1000);
nxt_event_fd_disable_read(thr->engine, &cls->socket);
return NXT_DECLINED;
}
void
nxt_event_conn_accept_error(nxt_thread_t *thr, nxt_event_conn_listen_t *cls,
const char *accept_syscall, nxt_err_t err)
{
static nxt_log_moderation_t nxt_accept_log_moderation = {
NXT_LOG_INFO, 2, "accept() failed", NXT_LOG_MODERATION
};
cls->socket.read_ready = 0;
switch (err) {
case NXT_EAGAIN:
nxt_log_debug(thr->log, "%s(%d) %E",
accept_syscall, cls->socket.fd, err);
return;
case ECONNABORTED:
nxt_log_moderate(&nxt_accept_log_moderation, NXT_LOG_INFO,
thr->log, "%s(%d) failed %E",
accept_syscall, cls->socket.fd, err);
return;
case EMFILE:
case ENFILE:
case ENOBUFS:
case ENOMEM:
if (nxt_event_conn_accept_close_idle(thr, cls) != NXT_OK) {
nxt_log_alert(thr->log, "%s(%d) failed %E, "
"new connections are not accepted within 1s",
accept_syscall, cls->socket.fd, err);
}
return;
default:
nxt_log_alert(thr->log, "%s(%d) failed %E",
accept_syscall, cls->socket.fd, err);
return;
}
}
static void
nxt_event_conn_listen_timer_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_t *c;
nxt_event_timer_t *ev;
nxt_event_conn_listen_t *cls;
ev = obj;
cls = nxt_event_timer_data(ev, nxt_event_conn_listen_t, timer);
c = cls->socket.data;
if (c == NULL) {
c = nxt_event_conn_accept_next(thr, cls);
if (c == NULL) {
return;
}
}
nxt_event_fd_enable_accept(thr->engine, &cls->socket);
cls->accept(thr, cls, c);
}
static void
nxt_event_conn_listen_event_error(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_fd_t *ev;
ev = obj;
nxt_log_alert(thr->log, "accept(%d) event error", ev->fd);
}

View File

@@ -0,0 +1,213 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
void
nxt_event_conn_connect(nxt_thread_t *thr, nxt_event_conn_t *c)
{
void *data;
data = c->socket.data;
if (thr->engine->batch != 0) {
nxt_thread_work_queue_add(thr, &thr->engine->socket_work_queue,
nxt_event_conn_batch_socket, c, data,
c->socket.log);
return;
}
if (nxt_event_conn_socket(thr, c) == NXT_OK) {
c->io->connect(thr, c, data);
return;
}
c->write_state->error_handler(thr, c, data);
}
void
nxt_event_conn_batch_socket(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_t *c;
nxt_work_handler_t handler;
c = obj;
if (nxt_event_conn_socket(thr, c) == NXT_OK) {
c->socket.write_work_queue = c->write_work_queue;
handler = c->io->connect;
} else {
handler = c->write_state->error_handler;
}
nxt_thread_work_queue_add(thr, &thr->engine->connect_work_queue,
handler, c, data, thr->log);
}
void
nxt_event_conn_io_connect(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_t *c;
nxt_work_handler_t handler;
const nxt_event_conn_state_t *state;
c = obj;
state = c->write_state;
switch (nxt_socket_connect(c->socket.fd, c->remote)) {
case NXT_OK:
c->socket.write_ready = 1;
handler = state->ready_handler;
break;
case NXT_AGAIN:
c->socket.write_handler = nxt_event_conn_connect_test;
c->socket.error_handler = state->error_handler;
nxt_event_conn_timer(thr->engine, c, state, &c->write_timer);
nxt_event_fd_enable_write(thr->engine, &c->socket);
return;
case NXT_DECLINED:
handler = state->close_handler;
break;
default: /* NXT_ERROR */
handler = state->error_handler;
break;
}
nxt_event_conn_io_handle(thr, c->write_work_queue, handler, c, data);
}
nxt_int_t
nxt_event_conn_socket(nxt_thread_t *thr, nxt_event_conn_t *c)
{
nxt_uint_t family;
nxt_socket_t s;
nxt_log_debug(thr->log, "event conn socket");
family = c->remote->u.sockaddr.sa_family;
s = nxt_socket_create(family, c->remote->type, 0, NXT_NONBLOCK);
if (nxt_slow_path(s == -1)) {
return NXT_ERROR;
}
c->sendfile = 1;
#if (NXT_HAVE_UNIX_DOMAIN && NXT_SOLARIS)
if (family == AF_UNIX) {
/* Solaris AF_UNIX does not support sendfilev(). */
c->sendfile = 0;
}
#endif
c->socket.fd = s;
nxt_event_timer_ident(&c->read_timer, s);
nxt_event_timer_ident(&c->write_timer, s);
if (c->local != NULL) {
if (nxt_slow_path(nxt_socket_bind(s, c->local, 0) != NXT_OK)) {
nxt_socket_close(s);
return NXT_ERROR;
}
}
return NXT_OK;
}
void
nxt_event_conn_connect_test(nxt_thread_t *thr, void *obj, void *data)
{
int ret, err;
socklen_t len;
nxt_event_conn_t *c;
c = obj;
nxt_log_debug(thr->log, "event connect test fd:%d", c->socket.fd);
nxt_event_fd_block_write(thr->engine, &c->socket);
if (c->write_state->autoreset_timer) {
nxt_event_timer_disable(&c->write_timer);
}
err = 0;
len = sizeof(int);
/*
* Linux and BSDs return 0 and store a pending error in the err argument;
* Solaris returns -1 and sets the errno.
*/
ret = getsockopt(c->socket.fd, SOL_SOCKET, SO_ERROR, (void *) &err, &len);
if (nxt_slow_path(ret == -1)) {
err = nxt_errno;
}
if (err == 0) {
nxt_event_conn_io_handle(thr, c->write_work_queue,
c->write_state->ready_handler, c, data);
return;
}
c->socket.error = err;
nxt_log_error(nxt_socket_error_level(err, c->socket.log_error), thr->log,
"connect(%d, %*s) failed %E",
c->socket.fd, c->remote->text_len, c->remote->text, err);
nxt_event_conn_connect_error(thr, c, data);
}
void
nxt_event_conn_connect_error(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_t *c;
nxt_work_handler_t handler;
const nxt_event_conn_state_t *state;
c = obj;
state = c->write_state;
switch (c->socket.error) {
case NXT_ECONNREFUSED:
#if (NXT_LINUX)
case NXT_EAGAIN:
/*
* Linux returns EAGAIN instead of ECONNREFUSED
* for UNIX sockets if a listen queue is full.
*/
#endif
handler = state->close_handler;
break;
default:
handler = state->error_handler;
break;
}
nxt_event_conn_io_handle(thr, c->write_work_queue, handler, c, data);
}

View File

@@ -0,0 +1,268 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#if (NXT_THREADS)
typedef struct {
nxt_job_t job;
nxt_buf_t *out;
size_t sent;
size_t limit;
nxt_work_handler_t ready_handler;
} nxt_job_sendfile_t;
static void nxt_event_conn_job_sendfile_start(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_event_conn_job_sendfile_handler(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_event_conn_job_sendfile_return(nxt_thread_t *thr, void *obj,
void *data);
static nxt_buf_t *nxt_event_conn_job_sendfile_completion(nxt_thread_t *thr,
nxt_event_conn_t *c, nxt_buf_t *b);
void
nxt_event_conn_job_sendfile(nxt_thread_t *thr, nxt_event_conn_t *c)
{
nxt_event_fd_disable(thr->engine, &c->socket);
/* A work item data is not used in nxt_event_conn_job_sendfile_start(). */
nxt_event_conn_job_sendfile_start(thr, c, NULL);
}
static void
nxt_event_conn_job_sendfile_start(nxt_thread_t *thr, void *obj, void *data)
{
nxt_iobuf_t b;
nxt_event_conn_t *c;
nxt_job_sendfile_t *jbs;
nxt_sendbuf_coalesce_t sb;
c = obj;
nxt_log_debug(thr->log, "event conn sendfile fd:%d", c->socket.fd);
jbs = nxt_job_create(c->mem_pool, sizeof(nxt_job_sendfile_t));
if (nxt_slow_path(jbs == NULL)) {
c->write_state->error_handler(thr, c, NULL);
return;
}
c->socket.write_handler = nxt_event_conn_job_sendfile_start;
c->socket.error_handler = c->write_state->error_handler;
jbs->job.data = c;
nxt_job_set_name(&jbs->job, "job sendfile");
jbs->limit = nxt_event_conn_write_limit(c);
if (jbs->limit != 0) {
sb.buf = c->write;
sb.iobuf = &b;
sb.nmax = 1;
sb.sync = 0;
sb.size = 0;
sb.limit = jbs->limit;
if (nxt_sendbuf_mem_coalesce(&sb) != 0 || !sb.sync) {
jbs->job.thread_pool = c->u.thread_pool;
jbs->job.log = c->socket.log;
jbs->out = c->write;
c->write = NULL;
jbs->ready_handler = nxt_event_conn_job_sendfile_return;
c->blocked = 1;
if (c->write_timer.state != NXT_EVENT_TIMER_DISABLED) {
c->write_timer.state = NXT_EVENT_TIMER_BLOCKED;
}
nxt_job_start(thr, &jbs->job, nxt_event_conn_job_sendfile_handler);
return;
}
}
nxt_event_conn_job_sendfile_return(thr, jbs, c);
}
static void
nxt_event_conn_job_sendfile_handler(nxt_thread_t *thr, void *obj, void *data)
{
ssize_t ret;
nxt_buf_t *b;
nxt_bool_t first;
nxt_event_conn_t *c;
nxt_job_sendfile_t *jbs;
jbs = obj;
c = data;
nxt_log_debug(thr->log, "event conn job sendfile fd:%d", c->socket.fd);
first = c->socket.write_ready;
b = jbs->out;
do {
ret = c->io->sendbuf(c, b, jbs->limit);
if (ret == NXT_AGAIN) {
break;
}
if (nxt_slow_path(ret == NXT_ERROR)) {
goto done;
}
jbs->sent += ret;
jbs->limit -= ret;
b = nxt_sendbuf_update(b, ret);
if (b == NULL) {
goto done;
}
if (jbs->limit == 0) {
if (c->rate == NULL) {
jbs->limit = c->max_chunk;
goto fast;
}
goto done;
}
} while (c->socket.write_ready);
if (first && thr->thread_pool->work_queue.head != NULL) {
goto fast;
}
done:
nxt_job_return(thr, &jbs->job, jbs->ready_handler);
return;
fast:
nxt_thread_pool_post(thr->thread_pool, nxt_event_conn_job_sendfile_handler,
jbs, c, thr->log);
}
static void
nxt_event_conn_job_sendfile_return(nxt_thread_t *thr, void *obj, void *data)
{
size_t sent;
nxt_buf_t *b;
nxt_bool_t done;
nxt_event_conn_t *c;
nxt_job_sendfile_t *jbs;
jbs = obj;
c = data;
c->blocked = 0;
sent = jbs->sent;
c->sent += sent;
nxt_log_debug(thr->log, "event conn sendfile sent:%z", sent);
b = jbs->out;
/* The job must be destroyed before connection error handler. */
nxt_job_destroy(jbs);
if (c->write_state->process_buffers) {
b = nxt_event_conn_job_sendfile_completion(thr, c, b);
done = (b == NULL);
/* Add data which might be added after sendfile job has started. */
nxt_buf_chain_add(&b, c->write);
c->write = b;
if (done) {
/* All data has been sent. */
if (b != NULL) {
/* But new data has been added. */
nxt_event_conn_job_sendfile_start(thr, c, NULL);
}
return;
}
}
if (sent != 0 && c->write_state->autoreset_timer) {
nxt_event_timer_disable(&c->write_timer);
} else if (c->write_timer.state == NXT_EVENT_TIMER_BLOCKED) {
c->write_timer.state = NXT_EVENT_TIMER_ACTIVE;
}
if (c->socket.error == 0
&& !nxt_event_conn_write_delayed(thr->engine, c, sent))
{
nxt_event_conn_timer(thr->engine, c, c->write_state, &c->write_timer);
nxt_event_fd_oneshot_write(thr->engine, &c->socket);
}
if (sent != 0) {
nxt_event_conn_io_handle(thr, c->write_work_queue,
c->write_state->ready_handler,
c, c->socket.data);
/*
* Fall through if first operations were
* successful but the last one failed.
*/
}
if (nxt_slow_path(c->socket.error != 0)) {
nxt_event_conn_io_handle(thr, c->write_work_queue,
c->write_state->error_handler,
c, c->socket.data);
}
}
static nxt_buf_t *
nxt_event_conn_job_sendfile_completion(nxt_thread_t *thr, nxt_event_conn_t *c,
nxt_buf_t *b)
{
while (b != NULL) {
nxt_prefetch(b->next);
if (nxt_buf_is_mem(b) && b->mem.pos != b->mem.free) {
break;
} else if (nxt_buf_is_file(b) && b->file_pos != b->file_end) {
break;
}
nxt_thread_work_queue_add(thr, c->write_work_queue,
b->completion_handler,
b, b->parent, thr->log);
b = b->next;
}
return b;
}
#endif

1034
src/nxt_event_conn_proxy.c Normal file

File diff suppressed because it is too large Load Diff

259
src/nxt_event_conn_read.c Normal file
View File

@@ -0,0 +1,259 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
void
nxt_event_conn_read(nxt_thread_t *thr, nxt_event_conn_t *c)
{
nxt_work_queue_t *wq;
nxt_work_handler_t handler;
handler = c->io->read;
if (thr->engine->batch != 0) {
wq = &thr->engine->read_work_queue;
c->socket.read_work_queue = wq;
nxt_thread_work_queue_add(thr, wq, handler, c, c->socket.data,
c->socket.log);
return;
}
handler(thr, c, c->socket.data);
}
void
nxt_event_conn_io_read(nxt_thread_t *thr, void *obj, void *data)
{
ssize_t n;
nxt_buf_t *b;
nxt_bool_t batch;
nxt_event_conn_t *c;
nxt_work_handler_t handler;
const nxt_event_conn_state_t *state;
c = obj;
nxt_log_debug(thr->log, "event conn read fd:%d rdy:%d cl:%d",
c->socket.fd, c->socket.read_ready, c->socket.closed);
batch = (thr->engine->batch != 0);
state = c->read_state;
if (c->socket.read_ready) {
b = c->read;
if (b == NULL) {
/* Just test descriptor readiness. */
goto ready;
}
if (c->peek == 0) {
n = c->io->recvbuf(c, b);
} else {
n = c->io->recv(c, b->mem.free, c->peek, MSG_PEEK);
}
if (n > 0) {
c->nbytes = n;
if (state->process_buffers) {
nxt_recvbuf_update(b, n);
} else {
/*
* A ready_handler must not be queued, instead buffers
* must be processed by the ready_handler at once after
* recv() operation, otherwise two sequentially queued
* recv() operations will read in the same buffers.
*/
batch = 0;
}
goto ready;
}
if (n != NXT_AGAIN) {
nxt_event_fd_block_read(thr->engine, &c->socket);
nxt_event_timer_disable(&c->read_timer);
if (n == 0) {
handler = state->close_handler;
goto done;
}
/* n == NXT_ERROR */
handler = state->error_handler;
goto done;
}
}
/*
* Here c->io->read() is assigned instead of direct
* nxt_event_conn_io_read() because the function can
* be called by nxt_kqueue_event_conn_io_read().
*/
c->socket.read_handler = c->io->read;
c->socket.error_handler = state->error_handler;
if (c->read_timer.state == NXT_EVENT_TIMER_DISABLED
|| nxt_event_fd_is_disabled(c->socket.read))
{
/* Timer may be set or reset. */
nxt_event_conn_timer(thr->engine, c, state, &c->read_timer);
if (nxt_event_fd_is_disabled(c->socket.read)) {
nxt_event_fd_enable_read(thr->engine, &c->socket);
}
}
return;
ready:
nxt_event_fd_block_read(thr->engine, &c->socket);
if (state->autoreset_timer) {
nxt_event_timer_disable(&c->read_timer);
}
handler = state->ready_handler;
done:
if (batch) {
nxt_thread_work_queue_add(thr, c->read_work_queue, handler,
c, data, thr->log);
} else {
handler(thr, c, data);
}
}
ssize_t
nxt_event_conn_io_recvbuf(nxt_event_conn_t *c, nxt_buf_t *b)
{
ssize_t n;
nxt_err_t err;
nxt_uint_t niov;
struct iovec iov[NXT_IOBUF_MAX];
nxt_recvbuf_coalesce_t rb;
rb.buf = b;
rb.iobuf = iov;
rb.nmax = NXT_IOBUF_MAX;
rb.size = 0;
niov = nxt_recvbuf_mem_coalesce(&rb);
if (niov == 1) {
/* Disposal of surplus kernel iovec copy-in operation. */
return nxt_event_conn_io_recv(c, iov->iov_base, iov->iov_len, 0);
}
for ( ;; ) {
n = readv(c->socket.fd, iov, niov);
err = (n == -1) ? nxt_socket_errno : 0;
nxt_log_debug(c->socket.log, "readv(%d, %ui): %z",
c->socket.fd, niov, n);
if (n > 0) {
if ((size_t) n < rb.size) {
c->socket.read_ready = 0;
}
return n;
}
if (n == 0) {
c->socket.closed = 1;
c->socket.read_ready = 0;
return n;
}
/* n == -1 */
switch (err) {
case NXT_EAGAIN:
nxt_log_debug(c->socket.log, "readv() %E", err);
c->socket.read_ready = 0;
return NXT_AGAIN;
case NXT_EINTR:
nxt_log_debug(c->socket.log, "readv() %E", err);
continue;
default:
c->socket.error = err;
nxt_log_error(nxt_socket_error_level(err, c->socket.log_error),
c->socket.log, "readv(%d, %ui) failed %E",
c->socket.fd, niov, err);
return NXT_ERROR;
}
}
}
ssize_t
nxt_event_conn_io_recv(nxt_event_conn_t *c, void *buf, size_t size,
nxt_uint_t flags)
{
ssize_t n;
nxt_err_t err;
for ( ;; ) {
n = recv(c->socket.fd, buf, size, flags);
err = (n == -1) ? nxt_socket_errno : 0;
nxt_log_debug(c->socket.log, "recv(%d, %p, %uz, 0x%ui): %z",
c->socket.fd, buf, size, flags, n);
if (n > 0) {
if ((size_t) n < size) {
c->socket.read_ready = 0;
}
return n;
}
if (n == 0) {
c->socket.closed = 1;
c->socket.read_ready = 0;
return n;
}
/* n == -1 */
switch (err) {
case NXT_EAGAIN:
nxt_log_debug(c->socket.log, "recv() %E", err);
c->socket.read_ready = 0;
return NXT_AGAIN;
case NXT_EINTR:
nxt_log_debug(c->socket.log, "recv() %E", err);
continue;
default:
c->socket.error = err;
nxt_log_error(nxt_socket_error_level(err, c->socket.log_error),
c->socket.log, "recv(%d, %p, %uz, %ui) failed %E",
c->socket.fd, buf, size, flags, err);
return NXT_ERROR;
}
}
}

431
src/nxt_event_conn_write.c Normal file
View File

@@ -0,0 +1,431 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static void nxt_event_conn_average_rate_update(nxt_event_write_rate_t *rate,
size_t sent, nxt_msec_t now);
NXT_LIB_UNIT_TEST_STATIC double
nxt_event_conn_exponential_approximation(double n);
static void nxt_event_conn_write_timer_handler(nxt_thread_t *thr, void *obj,
void *data);
void
nxt_event_conn_write(nxt_thread_t *thr, nxt_event_conn_t *c)
{
if (thr->engine->batch != 0) {
nxt_event_conn_write_enqueue(thr, c);
} else {
c->io->write(thr, c, c->socket.data);
}
}
void
nxt_event_conn_io_write(nxt_thread_t *thr, void *obj, void *data)
{
size_t sent, limit;
ssize_t ret;
nxt_buf_t *b;
nxt_event_conn_t *c;
c = obj;
nxt_log_debug(thr->log, "event conn write fd:%d", c->socket.fd);
if (!c->socket.write_ready || c->delayed || c->write == NULL) {
return;
}
c->socket.write_handler = nxt_event_conn_io_write;
c->socket.error_handler = c->write_state->error_handler;
ret = NXT_DECLINED;
sent = 0;
b = c->write;
limit = nxt_event_conn_write_limit(c);
while (limit != 0) {
ret = c->io->write_chunk(thr, c, b, limit);
if (ret < 0) {
/* ret == NXT_AGAIN || ret == NXT_ERROR. */
break;
}
sent += ret;
limit -= ret;
if (c->write_state->process_buffers) {
b = nxt_sendbuf_completion(thr, c->write_work_queue, b, ret);
c->write = b;
} else {
b = nxt_sendbuf_update(b, ret);
}
if (b == NULL) {
nxt_event_fd_block_write(thr->engine, &c->socket);
break;
}
if (!c->socket.write_ready) {
ret = NXT_AGAIN;
break;
}
}
nxt_log_debug(thr->log, "event conn: %i sent:%z", ret, sent);
if (sent != 0) {
if (c->write_state->autoreset_timer) {
nxt_event_timer_disable(&c->write_timer);
}
}
if (ret != NXT_ERROR
&& !nxt_event_conn_write_delayed(thr->engine, c, sent))
{
if (limit == 0) {
/*
* Postpone writing until next event poll to allow to
* process other recevied events and to get new events.
*/
c->write_timer.handler = nxt_event_conn_write_timer_handler;
nxt_event_timer_add(thr->engine, &c->write_timer, 0);
} else if (ret == NXT_AGAIN) {
/*
* SSL libraries can require to toggle either write or read
* event if renegotiation occurs during SSL write operation.
* This case is handled on the event_io->send() level. Timer
* can be set here because it should be set only for write
* direction.
*/
nxt_event_conn_timer(thr->engine, c, c->write_state,
&c->write_timer);
}
}
if (ret == 0 || sent != 0) {
/* "ret == 0" means a sync buffer was processed. */
c->sent += sent;
nxt_event_conn_io_handle(thr, c->write_work_queue,
c->write_state->ready_handler, c, data);
/*
* Fall through if first operations were
* successful but the last one failed.
*/
}
if (nxt_slow_path(ret == NXT_ERROR)) {
nxt_event_fd_block_write(thr->engine, &c->socket);
nxt_event_conn_io_handle(thr, c->write_work_queue,
c->write_state->error_handler, c, data);
}
}
size_t
nxt_event_conn_write_limit(nxt_event_conn_t *c)
{
ssize_t limit, correction;
nxt_event_write_rate_t *rate;
rate = c->rate;
if (rate == NULL) {
return c->max_chunk;
}
limit = rate->limit;
correction = limit - (size_t) rate->average;
nxt_log_debug(c->socket.log, "event conn correction:%z average:%0.3f",
correction, rate->average);
limit += correction;
if (limit <= 0) {
return 0;
}
if (rate->limit_after != 0) {
limit += rate->limit_after;
limit = nxt_min((size_t) limit, rate->max_limit);
}
return nxt_min((size_t) limit, c->max_chunk);
}
nxt_bool_t
nxt_event_conn_write_delayed(nxt_event_engine_t *engine, nxt_event_conn_t *c,
size_t sent)
{
nxt_msec_t timer;
nxt_event_write_rate_t *rate;
rate = c->rate;
if (rate != NULL) {
nxt_event_conn_average_rate_update(rate, sent, engine->timers.now);
if (rate->limit_after == 0) {
timer = sent * 1000 / rate->limit;
} else if (rate->limit_after >= sent) {
timer = sent * 1000 / rate->max_limit;
rate->limit_after -= sent;
} else {
sent -= rate->limit_after;
timer = rate->limit_after * 1000 / rate->max_limit
+ sent * 1000 / rate->limit;
rate->limit_after = 0;
}
nxt_log_debug(c->socket.log, "event conn timer: %M", timer);
if (timer != 0) {
c->delayed = 1;
nxt_event_fd_block_write(engine, &c->socket);
c->write_timer.handler = nxt_event_conn_write_timer_handler;
nxt_event_timer_add(engine, &c->write_timer, timer);
return 1;
}
}
return 0;
}
/* Exponentially weighted moving average rate for a given interval. */
static void
nxt_event_conn_average_rate_update(nxt_event_write_rate_t *rate, size_t sent,
nxt_msec_t now)
{
double weight, delta;
nxt_msec_t elapsed;
const nxt_uint_t interval = 10; /* 10s */
elapsed = now - rate->last;
if (elapsed == 0) {
return;
}
rate->last = now;
delta = (double) elapsed / 1000;
weight = nxt_event_conn_exponential_approximation(-delta / interval);
rate->average = (1 - weight) * sent / delta + weight * rate->average;
nxt_thread_log_debug("event conn delta:%0.3f, weight:%0.3f, average:%0.3f",
delta, weight, rate->average);
}
/*
* exp() takes tens or hundreds nanoseconds on modern CPU.
* This is a faster exp() approximation based on IEEE-754 format
* layout and described in "A Fast, Compact Approximation of
* the Exponential Function" * by N. N. Schraudolph, 1999.
*/
NXT_LIB_UNIT_TEST_STATIC double
nxt_event_conn_exponential_approximation(double x)
{
union {
double d;
int64_t n;
} exp;
if (x < -100) {
/*
* The approximation is correct in -700 to 700 range.
* The "x" argument is always negative.
*/
return 0;
}
/*
* x * 2^52 / ln(2) + (1023 * 2^52 - 261140389990637.73
*
* 52 is the number of mantissa bits;
* 1023 is the exponent bias;
* 261140389990637.73 is the adjustment parameter to
* improve the approximation. The parameter is equal to
*
* 2^52 * ln[ 3 / (8 * ln(2)) + 0.5 ] / ln(2)
*
* Only significant digits of the double float format
* are used to present the double float constants.
*/
exp.n = x * 4503599627370496.0 / 0.69314718055994530
+ (4607182418800017408.0 - 261140389990637.73);
return exp.d;
}
static void
nxt_event_conn_write_timer_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_conn_t *c;
nxt_event_timer_t *ev;
ev = obj;
nxt_log_debug(thr->log, "event conn conn timer");
c = nxt_event_write_timer_conn(ev);
c->delayed = 0;
c->io->write(thr, c, c->socket.data);
}
ssize_t
nxt_event_conn_io_write_chunk(nxt_thread_t *thr, nxt_event_conn_t *c,
nxt_buf_t *b, size_t limit)
{
ssize_t ret;
ret = c->io->sendbuf(c, b, limit);
if ((ret == NXT_AGAIN || !c->socket.write_ready)
&& nxt_event_fd_is_disabled(c->socket.write))
{
nxt_event_fd_enable_write(thr->engine, &c->socket);
}
return ret;
}
ssize_t
nxt_event_conn_io_sendbuf(nxt_event_conn_t *c, nxt_buf_t *b, size_t limit)
{
nxt_uint_t niob;
struct iovec iob[NXT_IOBUF_MAX];
nxt_sendbuf_coalesce_t sb;
sb.buf = b;
sb.iobuf = iob;
sb.nmax = NXT_IOBUF_MAX;
sb.sync = 0;
sb.size = 0;
sb.limit = limit;
niob = nxt_sendbuf_mem_coalesce(&sb);
if (niob == 0 && sb.sync) {
return 0;
}
return nxt_event_conn_io_writev(c, iob, niob);
}
ssize_t
nxt_event_conn_io_writev(nxt_event_conn_t *c, nxt_iobuf_t *iob, nxt_uint_t niob)
{
ssize_t n;
nxt_err_t err;
if (niob == 1) {
/* Disposal of surplus kernel iovec copy-in operation. */
return nxt_event_conn_io_send(c, iob->iov_base, iob->iov_len);
}
for ( ;; ) {
n = writev(c->socket.fd, iob, niob);
err = (n == -1) ? nxt_socket_errno : 0;
nxt_log_debug(c->socket.log, "writev(%d, %ui): %d",
c->socket.fd, niob, n);
if (n > 0) {
return n;
}
/* n == -1 */
switch (err) {
case NXT_EAGAIN:
nxt_log_debug(c->socket.log, "writev() %E", err);
c->socket.write_ready = 0;
return NXT_AGAIN;
case NXT_EINTR:
nxt_log_debug(c->socket.log, "writev() %E", err);
continue;
default:
c->socket.error = err;
nxt_log_error(nxt_socket_error_level(err, c->socket.log_error),
c->socket.log, "writev(%d, %ui) failed %E",
c->socket.fd, niob, err);
return NXT_ERROR;
}
}
}
ssize_t
nxt_event_conn_io_send(nxt_event_conn_t *c, void *buf, size_t size)
{
ssize_t n;
nxt_err_t err;
for ( ;; ) {
n = send(c->socket.fd, buf, size, 0);
err = (n == -1) ? nxt_socket_errno : 0;
nxt_log_debug(c->socket.log, "send(%d, %p, %uz): %z",
c->socket.fd, buf, size, n);
if (n > 0) {
return n;
}
/* n == -1 */
switch (err) {
case NXT_EAGAIN:
nxt_log_debug(c->socket.log, "send() %E", err);
c->socket.write_ready = 0;
return NXT_AGAIN;
case NXT_EINTR:
nxt_log_debug(c->socket.log, "send() %E", err);
continue;
default:
c->socket.error = err;
nxt_log_error(nxt_socket_error_level(err, c->socket.log_error),
c->socket.log, "send(%d, %p, %uz) failed %E",
c->socket.fd, buf, size, err);
return NXT_ERROR;
}
}
}

526
src/nxt_event_engine.c Normal file
View File

@@ -0,0 +1,526 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static nxt_int_t nxt_event_engine_post_init(nxt_thread_t *thr,
nxt_event_engine_t *engine);
static nxt_int_t nxt_event_engine_signal_pipe_create(nxt_thread_t *thr,
nxt_event_engine_t *engine);
static void nxt_event_engine_signal_pipe_close(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_event_engine_signal_pipe(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_event_engine_post_handler(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_event_engine_signal_pipe_error(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_event_engine_signal_handler(nxt_thread_t *thr, void *obj,
void *data);
static const nxt_event_sig_t *nxt_event_engine_signal_find(nxt_thread_t *thr,
nxt_uint_t signo);
nxt_event_engine_t *
nxt_event_engine_create(nxt_thread_t *thr, const nxt_event_set_ops_t *event_set,
const nxt_event_sig_t *signals, nxt_uint_t flags, nxt_uint_t batch)
{
nxt_uint_t events;
nxt_event_engine_t *engine;
engine = nxt_zalloc(sizeof(nxt_event_engine_t));
if (engine == NULL) {
return NULL;
}
engine->batch = batch;
if (flags & NXT_ENGINE_FIBERS) {
engine->fibers = nxt_fiber_main_create(engine);
if (engine->fibers == NULL) {
goto fibers_fail;
}
}
nxt_thread_work_queue_create(thr, 0);
nxt_work_queue_name(&engine->accept_work_queue, "accept");
nxt_work_queue_name(&engine->read_work_queue, "read");
nxt_work_queue_name(&engine->socket_work_queue, "socket");
nxt_work_queue_name(&engine->connect_work_queue, "connect");
nxt_work_queue_name(&engine->write_work_queue, "write");
nxt_work_queue_name(&engine->shutdown_work_queue, "shutdown");
nxt_work_queue_name(&engine->close_work_queue, "close");
#if (NXT_THREADS)
nxt_locked_work_queue_create(&engine->work_queue, 7);
#endif
if (signals != NULL) {
engine->signals = nxt_event_engine_signals(signals);
if (engine->signals == NULL) {
goto signals_fail;
}
engine->signals->handler = nxt_event_engine_signal_handler;
if (!event_set->signal_support) {
if (nxt_event_engine_signals_start(engine) != NXT_OK) {
goto signals_fail;
}
}
}
/*
* Number of event set and timers changes should be at least twice
* more than number of events to avoid premature flushes of the changes.
* Fourfold is for sure.
*/
events = (batch != 0) ? batch : 32;
engine->event_set = event_set->create(engine->signals, 4 * events, events);
if (engine->event_set == NULL) {
goto event_set_fail;
}
engine->event = event_set;
if (nxt_event_engine_post_init(thr, engine) != NXT_OK) {
goto post_fail;
}
if (nxt_event_timers_init(&engine->timers, 4 * events) != NXT_OK) {
goto timers_fail;
}
nxt_thread_time_update(thr);
engine->timers.now = nxt_thread_monotonic_time(thr) / 1000000;
engine->max_connections = 0xffffffff;
nxt_queue_init(&engine->listen_connections);
nxt_queue_init(&engine->idle_connections);
thr->engine = engine;
thr->fiber = &engine->fibers->fiber;
#if !(NXT_THREADS)
if (engine->event->signal_support) {
thr->time.signal = -1;
}
#endif
return engine;
timers_fail:
post_fail:
event_set->free(engine->event_set);
event_set_fail:
signals_fail:
nxt_free(engine->signals);
nxt_thread_work_queue_destroy(thr);
nxt_free(engine->fibers);
fibers_fail:
nxt_free(engine);
return NULL;
}
static nxt_int_t
nxt_event_engine_post_init(nxt_thread_t *thr, nxt_event_engine_t *engine)
{
if (engine->event->enable_post != NULL) {
return engine->event->enable_post(engine->event_set,
nxt_event_engine_post_handler);
}
#if !(NXT_THREADS)
/* Only signals may are posted in single-threaded mode. */
if (engine->event->signal_support) {
return NXT_OK;
}
#endif
if (nxt_event_engine_signal_pipe_create(thr, engine) != NXT_OK) {
return NXT_ERROR;
}
return NXT_OK;
}
static nxt_int_t
nxt_event_engine_signal_pipe_create(nxt_thread_t *thr,
nxt_event_engine_t *engine)
{
nxt_event_engine_pipe_t *pipe;
pipe = nxt_zalloc(sizeof(nxt_event_engine_pipe_t));
if (pipe == NULL) {
return NXT_ERROR;
}
engine->pipe = pipe;
/*
* An event engine pipe is in blocking mode for writer
* and in non-blocking node for reader.
*/
if (nxt_pipe_create(pipe->fds, 1, 0) != NXT_OK) {
nxt_free(pipe);
return NXT_ERROR;
}
pipe->event.fd = pipe->fds[0];
pipe->event.read_work_queue = &thr->work_queue.main;
pipe->event.read_handler = nxt_event_engine_signal_pipe;
pipe->event.write_work_queue = &thr->work_queue.main;
pipe->event.error_handler = nxt_event_engine_signal_pipe_error;
pipe->event.log = &nxt_main_log;
nxt_event_fd_enable_read(engine, &pipe->event);
return NXT_OK;
}
static void
nxt_event_engine_signal_pipe_free(nxt_event_engine_t *engine)
{
nxt_event_engine_pipe_t *pipe;
pipe = engine->pipe;
if (pipe != NULL) {
if (pipe->event.read_work_queue != NULL) {
nxt_event_fd_close(engine, &pipe->event);
nxt_pipe_close(pipe->fds);
}
nxt_free(pipe);
}
}
static void
nxt_event_engine_signal_pipe_close(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_engine_pipe_t *pipe;
pipe = obj;
nxt_pipe_close(pipe->fds);
nxt_free(pipe);
}
void
nxt_event_engine_post(nxt_event_engine_t *engine, nxt_work_handler_t handler,
void *obj, void *data, nxt_log_t *log)
{
nxt_thread_log_debug("event engine post");
nxt_locked_work_queue_add(&engine->work_queue, handler, obj, data, log);
nxt_event_engine_signal(engine, 0);
}
void
nxt_event_engine_signal(nxt_event_engine_t *engine, nxt_uint_t signo)
{
u_char buf;
nxt_thread_log_debug("event engine signal:%ui", signo);
/*
* A signal number may be sent in a signal context, so the signal
* information cannot be passed via a locked work queue.
*/
if (engine->event->signal != NULL) {
engine->event->signal(engine->event_set, signo);
return;
}
buf = (u_char) signo;
(void) nxt_fd_write(engine->pipe->fds[1], &buf, 1);
}
static void
nxt_event_engine_signal_pipe(nxt_thread_t *thr, void *obj, void *data)
{
int i, n;
u_char signo;
nxt_bool_t post;
nxt_event_fd_t *ev;
const nxt_event_sig_t *sigev;
u_char buf[128];
ev = obj;
nxt_log_debug(thr->log, "engine signal pipe");
post = 0;
do {
n = nxt_fd_read(ev->fd, buf, sizeof(buf));
for (i = 0; i < n; i++) {
signo = buf[i];
nxt_log_debug(thr->log, "engine pipe signo:%d", signo);
if (signo == 0) {
/* A post should be processed only once. */
post = 1;
} else {
sigev = nxt_event_engine_signal_find(thr, signo);
if (nxt_fast_path(sigev != NULL)) {
sigev->handler(thr, (void *) (uintptr_t) signo,
(void *) sigev->name);
}
}
}
} while (n == sizeof(buf));
if (post) {
nxt_event_engine_post_handler(thr, NULL, NULL);
}
}
static void
nxt_event_engine_post_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_locked_work_queue_move(thr, &thr->engine->work_queue,
&thr->work_queue.main);
}
static void
nxt_event_engine_signal_pipe_error(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_fd_t *ev;
ev = obj;
nxt_log_alert(ev->log, "engine pipe(%FD:%FD) event error",
thr->engine->pipe->fds[0], thr->engine->pipe->fds[1]);
nxt_event_fd_close(thr->engine, &thr->engine->pipe->event);
nxt_pipe_close(thr->engine->pipe->fds);
}
static void
nxt_event_engine_signal_handler(nxt_thread_t *thr, void *obj, void *data)
{
uintptr_t signo;
const nxt_event_sig_t *sigev;
signo = (uintptr_t) obj;
sigev = nxt_event_engine_signal_find(thr, signo);
if (nxt_fast_path(sigev != NULL)) {
sigev->handler(thr, (void *) (uintptr_t) signo, (void *) sigev->name);
}
}
static const nxt_event_sig_t *
nxt_event_engine_signal_find(nxt_thread_t *thr, nxt_uint_t signo)
{
const nxt_event_sig_t *sigev;
for (sigev = thr->engine->signals->sigev; sigev->signo != 0; sigev++) {
if (signo == (nxt_uint_t) sigev->signo) {
return sigev;
}
}
nxt_log_alert(thr->log, "signal %ui handler not found", signo);
return NULL;
}
nxt_int_t
nxt_event_engine_change(nxt_thread_t *thr, const nxt_event_set_ops_t *event_set,
nxt_uint_t batch)
{
nxt_uint_t events;
nxt_event_engine_t *engine;
engine = thr->engine;
engine->batch = batch;
if (!engine->event->signal_support && event_set->signal_support) {
/*
* Block signal processing if the current event
* facility does not support signal processing.
*/
nxt_event_engine_signals_stop(engine);
/*
* Add to thread main work queue the signal events possibly
* received before the blocking signal processing.
*/
nxt_event_engine_signal_pipe(thr, &engine->pipe->event, NULL);
}
if (engine->pipe != NULL && event_set->enable_post != NULL) {
/*
* An engine pipe must be closed after all signal events
* added above to thread main work queue will be processed.
*/
nxt_thread_work_queue_add(thr, &thr->work_queue.main,
nxt_event_engine_signal_pipe_close,
engine->pipe, NULL, &nxt_main_log);
engine->pipe = NULL;
}
engine->event->free(engine->event_set);
events = (batch != 0) ? batch : 32;
engine->event_set = event_set->create(engine->signals, 4 * events, events);
if (engine->event_set == NULL) {
return NXT_ERROR;
}
engine->event = event_set;
if (nxt_event_engine_post_init(thr, engine) != NXT_OK) {
return NXT_ERROR;
}
if (engine->signals != NULL) {
if (!engine->event->signal_support) {
return nxt_event_engine_signals_start(engine);
}
#if (NXT_THREADS)
/*
* Reset the PID flag to start the signal thread if
* some future event facility will not support signals.
*/
engine->signals->process = 0;
#endif
}
return NXT_OK;
}
void
nxt_event_engine_free(nxt_event_engine_t *engine)
{
nxt_event_engine_signal_pipe_free(engine);
nxt_free(engine->signals);
nxt_locked_work_queue_destroy(&engine->work_queue);
nxt_thread_work_queue_destroy(nxt_thread());
engine->event->free(engine->event_set);
/* TODO: free timers */
nxt_free(engine);
}
void
nxt_event_engine_start(nxt_event_engine_t *engine)
{
void *obj, *data;
nxt_msec_t timeout, now;
nxt_thread_t *thr;
nxt_work_handler_t handler;
thr = nxt_thread();
if (engine->fibers) {
/*
* _setjmp() cannot be wrapped in a function since return from
* the function clobbers stack used by future _setjmp() returns.
*/
_setjmp(engine->fibers->fiber.jmp);
/* A return point from fibers. */
}
for ( ;; ) {
for ( ;; ) {
handler = nxt_thread_work_queue_pop(thr, &obj, &data, &thr->log);
if (handler == NULL) {
break;
}
handler(thr, obj, data);
thr->log = &nxt_main_log;
}
for ( ;; ) {
handler = nxt_thread_last_work_queue_pop(thr, &obj, &data,
&thr->log);
if (handler == NULL) {
break;
}
handler(thr, obj, data);
thr->log = &nxt_main_log;
}
/* Attach some event engine work queues in preferred order. */
nxt_work_queue_attach(thr, &engine->accept_work_queue);
nxt_work_queue_attach(thr, &engine->read_work_queue);
timeout = nxt_event_timer_find(engine);
engine->event->poll(thr, engine->event_set, timeout);
/*
* Look up expired timers only if a new zero timer has been
* just added before the event poll or if the event poll slept
* at least 1 millisecond, because all old eligible timers were
* processed in the previous iterations.
*/
now = nxt_thread_monotonic_time(thr) / 1000000;
if (timeout == 0 || now != engine->timers.now) {
nxt_event_timer_expire(thr, now);
}
}
}

94
src/nxt_event_engine.h Normal file
View File

@@ -0,0 +1,94 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_EVENT_ENGINE_H_INCLUDED_
#define _NXT_EVENT_ENGINE_H_INCLUDED_
#define NXT_ENGINE_FIBERS 1
typedef struct {
nxt_fd_t fds[2];
nxt_event_fd_t event;
} nxt_event_engine_pipe_t;
struct nxt_event_engine_s {
const nxt_event_set_ops_t *event;
nxt_event_set_t *event_set;
nxt_event_timers_t timers;
/* The engine ID, the main engine has ID 0. */
uint32_t id;
/*
* A pipe to pass event signals to the engine, if the engine's
* underlying event facility does not support user events.
*/
nxt_event_engine_pipe_t *pipe;
nxt_work_queue_t accept_work_queue;
nxt_work_queue_t read_work_queue;
nxt_work_queue_t socket_work_queue;
nxt_work_queue_t connect_work_queue;
nxt_work_queue_t write_work_queue;
nxt_work_queue_t shutdown_work_queue;
nxt_work_queue_t close_work_queue;
nxt_locked_work_queue_t work_queue;
nxt_event_signals_t *signals;
nxt_fiber_main_t *fibers;
uint8_t shutdown; /* 1 bit */
uint32_t batch;
uint32_t connections;
uint32_t max_connections;
nxt_queue_t listen_connections;
nxt_queue_t idle_connections;
};
NXT_EXPORT nxt_event_engine_t *nxt_event_engine_create(nxt_thread_t *thr,
const nxt_event_set_ops_t *event_set, const nxt_event_sig_t *signals,
nxt_uint_t flags, nxt_uint_t batch);
NXT_EXPORT nxt_int_t nxt_event_engine_change(nxt_thread_t *thr,
const nxt_event_set_ops_t *event_set, nxt_uint_t batch);
NXT_EXPORT void nxt_event_engine_free(nxt_event_engine_t *engine);
NXT_EXPORT void nxt_event_engine_start(nxt_event_engine_t *engine);
NXT_EXPORT void nxt_event_engine_post(nxt_event_engine_t *engine,
nxt_work_handler_t handler, void *obj, void *data, nxt_log_t *log);
NXT_EXPORT void nxt_event_engine_signal(nxt_event_engine_t *engine,
nxt_uint_t signo);
nxt_inline nxt_event_engine_t *
nxt_thread_event_engine(void)
{
nxt_thread_t *thr;
thr = nxt_thread();
return thr->engine;
}
nxt_inline nxt_work_queue_t *
nxt_thread_main_work_queue(void)
{
nxt_thread_t *thr;
thr = nxt_thread();
return &thr->work_queue.main;
}
#endif /* _NXT_EVENT_ENGINE_H_INCLUDED_ */

110
src/nxt_event_fd.h Normal file
View File

@@ -0,0 +1,110 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_EVENT_FD_H_INCLUDED_
#define _NXT_EVENT_FD_H_INCLUDED_
typedef enum {
/* A completely inactive event. */
NXT_EVENT_INACTIVE = 0,
/*
* An event presents in the kernel but disabled after oneshot.
* Used by epoll.
*/
NXT_EVENT_DISABLED,
/*
* An event is active in the kernel but blocked by application.
* Used by kqueue, epoll, eventport, devpoll, and pollset.
*/
NXT_EVENT_BLOCKED,
/*
* An active oneshot event.
* Used by epoll, devpoll, pollset, poll, and select.
*/
NXT_EVENT_ONESHOT,
/* An active level-triggered event. Used by eventport. */
NXT_EVENT_LEVEL,
/* An active event. */
NXT_EVENT_DEFAULT,
} nxt_event_fd_state_t;
#define \
nxt_event_fd_is_disabled(state) \
((state) < NXT_EVENT_ONESHOT)
#define \
nxt_event_fd_is_active(state) \
((state) >= NXT_EVENT_ONESHOT)
struct nxt_event_fd_s {
void *data;
/* Both are int's. */
nxt_socket_t fd;
nxt_err_t error;
/* The flags should also be prefetched by nxt_work_queue_pop(). */
#if (NXT_64BIT)
uint8_t read;
uint8_t write;
uint8_t log_error;
uint8_t read_ready;
uint8_t write_ready;
uint8_t closed;
uint8_t timedout;
#if (NXT_HAVE_EPOLL)
uint8_t epoll_eof:1;
uint8_t epoll_error:1;
#endif
#if (NXT_HAVE_KQUEUE)
uint8_t kq_eof;
#endif
#else /* NXT_32BIT */
nxt_event_fd_state_t read:3;
nxt_event_fd_state_t write:3;
nxt_socket_error_level_t log_error:3;
unsigned read_ready:1;
unsigned write_ready:1;
unsigned closed:1;
unsigned timedout:1;
#if (NXT_HAVE_EPOLL)
unsigned epoll_eof:1;
unsigned epoll_error:1;
#endif
#if (NXT_HAVE_KQUEUE)
unsigned kq_eof:1;
#endif
#endif /* NXT_64BIT */
#if (NXT_HAVE_KQUEUE)
/* nxt_err_t is int. */
nxt_err_t kq_errno;
/* struct kevent.data is intptr_t, however int32_t is enough. */
int32_t kq_available;
#endif
nxt_work_queue_t *read_work_queue;
nxt_work_handler_t read_handler;
nxt_work_queue_t *write_work_queue;
nxt_work_handler_t write_handler;
nxt_work_handler_t error_handler;
nxt_log_t *log;
};
#endif /* _NXT_EVENT_FD_H_INCLUDED_ */

17
src/nxt_event_file.h Normal file
View File

@@ -0,0 +1,17 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_EVENT_FILE_H_INCLUDED_
#define _NXT_EVENT_FILE_H_INCLUDED_
typedef struct {
void *data;
nxt_file_t *file;
nxt_work_handler_t handler;
} nxt_event_file_t;
#endif /* _NXT_EVENT_FILE_H_INCLUDED_ */

107
src/nxt_event_set.c Normal file
View File

@@ -0,0 +1,107 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static nxt_int_t nxt_event_set_fd_hash_test(nxt_lvlhsh_query_t *lhq,
void *data);
static const nxt_lvlhsh_proto_t nxt_event_set_fd_hash_proto nxt_aligned(64) =
{
NXT_LVLHSH_LARGE_MEMALIGN,
0,
nxt_event_set_fd_hash_test,
nxt_lvlhsh_alloc,
nxt_lvlhsh_free,
};
/* nxt_murmur_hash2() is unique for 4 bytes. */
static nxt_int_t
nxt_event_set_fd_hash_test(nxt_lvlhsh_query_t *lhq, void *data)
{
return NXT_OK;
}
nxt_int_t
nxt_event_set_fd_hash_add(nxt_lvlhsh_t *lh, nxt_fd_t fd, nxt_event_fd_t *ev)
{
nxt_lvlhsh_query_t lhq;
lhq.key_hash = nxt_murmur_hash2(&fd, sizeof(nxt_fd_t));
lhq.replace = 0;
lhq.value = ev;
lhq.proto = &nxt_event_set_fd_hash_proto;
if (nxt_lvlhsh_insert(lh, &lhq) == NXT_OK) {
return NXT_OK;
}
nxt_log_alert(ev->log, "event fd %d is already in hash", ev->fd);
return NXT_ERROR;
}
void *
nxt_event_set_fd_hash_get(nxt_lvlhsh_t *lh, nxt_fd_t fd)
{
nxt_lvlhsh_query_t lhq;
lhq.key_hash = nxt_murmur_hash2(&fd, sizeof(nxt_fd_t));
lhq.proto = &nxt_event_set_fd_hash_proto;
if (nxt_lvlhsh_find(lh, &lhq) == NXT_OK) {
return lhq.value;
}
nxt_thread_log_alert("event fd %d not found in hash", fd);
return NULL;
}
void
nxt_event_set_fd_hash_delete(nxt_lvlhsh_t *lh, nxt_fd_t fd, nxt_bool_t ignore)
{
nxt_lvlhsh_query_t lhq;
lhq.key_hash = nxt_murmur_hash2(&fd, sizeof(nxt_fd_t));
lhq.proto = &nxt_event_set_fd_hash_proto;
if (nxt_lvlhsh_delete(lh, &lhq) != NXT_OK && !ignore) {
nxt_thread_log_alert("event fd %d not found in hash", fd);
}
}
void
nxt_event_set_fd_hash_destroy(nxt_lvlhsh_t *lh)
{
nxt_event_fd_t *ev;
nxt_lvlhsh_each_t lhe;
nxt_lvlhsh_query_t lhq;
nxt_memzero(&lhe, sizeof(nxt_lvlhsh_each_t));
lhe.proto = &nxt_event_set_fd_hash_proto;
lhq.proto = &nxt_event_set_fd_hash_proto;
for ( ;; ) {
ev = nxt_lvlhsh_each(lh, &lhe);
if (ev == NULL) {
return;
}
lhq.key_hash = nxt_murmur_hash2(&ev->fd, sizeof(nxt_fd_t));
if (nxt_lvlhsh_delete(lh, &lhq) != NXT_OK) {
nxt_thread_log_alert("event fd %d not found in hash", ev->fd);
}
}
}

473
src/nxt_event_set.h Normal file
View File

@@ -0,0 +1,473 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_EVENT_SET_H_INCLUDED_
#define _NXT_EVENT_SET_H_INCLUDED_
/*
* An event facility is kernel interface such as kqueue, epoll, etc.
* intended to get event notifications about file descriptor state,
* signals, etc.
*
* An event set provides generic interface to underlying event facility.
* Although event set and event facility are closely coupled with an event
* engine, nevertheless they are separated from an event engine to allow
* to add one event facility to another if underlying event facility allows
* this (Linux epoll, BSD kqueue, Solaris eventport).
*/
typedef union nxt_event_set_u nxt_event_set_t;
#define NXT_FILE_EVENTS 1
#define NXT_NO_FILE_EVENTS 0
#define NXT_SIGNAL_EVENTS 1
#define NXT_NO_SIGNAL_EVENTS 0
typedef struct {
/* The canonical event set name. */
const char *name;
/*
* Create an event set. The mchanges argument is a maximum number of
* changes to send to the kernel. The mevents argument is a maximum
* number of events to retrieve from the kernel at once, if underlying
* event facility supports batch operations.
*/
nxt_event_set_t *(*create)(nxt_event_signals_t *signals,
nxt_uint_t mchanges, nxt_uint_t mevents);
/* Close and free an event set. */
void (*free)(nxt_event_set_t *data);
/*
* Add a file descriptor to an event set and enable the most
* effective read and write event notification method provided
* by underlying event facility.
*/
void (*enable)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/* Disable file descriptor event notifications. */
void (*disable)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/*
* Delete a file descriptor from an event set. A possible usage
* is a moving of the file descriptor from one event set to another.
*/
void (*delete)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/*
* Delete a file descriptor from an event set before closing the
* file descriptor. The most event facilities such as Linux epoll,
* BSD kqueue, Solaris event ports, AIX pollset, and HP-UX /dev/poll
* delete a file descriptor automatically on the file descriptor close.
* Some facilities such as Solaris /dev/poll require to delete a file
* descriptor explicitly.
*/
void (*close)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/*
* Add a file descriptor to an event set and enable the most effective
* read event notification method provided by underlying event facility.
*/
void (*enable_read)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/*
* Add a file descriptor to an event set and enable the most effective
* write event notification method provided by underlying event facility.
*/
void (*enable_write)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/* Disable file descriptor read event notifications. */
void (*disable_read)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/* Disable file descriptor write event notifications. */
void (*disable_write)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/* Block file descriptor read event notifications. */
void (*block_read)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/* Block file descriptor write event notifications. */
void (*block_write)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/*
* Add a file descriptor to an event set and enable an oneshot
* read event notification method.
*/
void (*oneshot_read)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/*
* Add a file descriptor to an event set and enable an oneshot
* write event notification method.
*/
void (*oneshot_write)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/*
* Add a listening socket descriptor to an event set and enable
* a level-triggered read event notification method.
*/
void (*enable_accept)(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
/*
* Add a file to an event set and enable a file change notification
* events.
*/
void (*enable_file)(nxt_event_set_t *event_set,
nxt_event_file_t *fev);
/*
* Delete a file from an event set before closing the file descriptor.
*/
void (*close_file)(nxt_event_set_t *event_set,
nxt_event_file_t *fev);
/*
* Enable post event notifications and set a post handler to handle
* the zero signal.
*/
nxt_int_t (*enable_post)(nxt_event_set_t *event_set,
nxt_work_handler_t handler);
/*
* Signal an event set. If a signal number is non-zero then
* a signal handler added to the event set is called. This is
* a way to route Unix signals to an event engine if underlying
* event facility does not support signal events.
*
* If a signal number is zero, then the post_handler of the event
* set is called. This has no relation to Unix signals but is
* a way to wake up the event set to process works posted to
* the event engine locked work queue.
*/
void (*signal)(nxt_event_set_t *event_set,
nxt_uint_t signo);
/* Poll an event set for new event notifications. */
void (*poll)(nxt_thread_t *thr,
nxt_event_set_t *event_set,
nxt_msec_t timeout);
/* I/O operations suitable to underlying event facility. */
nxt_event_conn_io_t *io;
/* True if an event facility supports file change event notifications. */
uint8_t file_support; /* 1 bit */
/* True if an event facility supports signal event notifications. */
uint8_t signal_support; /* 1 bit */
} nxt_event_set_ops_t;
#if (NXT_HAVE_KQUEUE)
typedef struct {
int kqueue;
int nchanges;
int mchanges;
int mevents;
nxt_pid_t pid;
nxt_work_handler_t post_handler;
struct kevent *changes;
struct kevent *events;
} nxt_kqueue_event_set_t;
extern const nxt_event_set_ops_t nxt_kqueue_event_set;
#endif
#if (NXT_HAVE_EPOLL)
typedef struct {
int op;
/*
* Although file descriptor can be obtained using pointer to a
* nxt_event_fd_t stored in event.data.ptr, nevertheless storing
* the descriptor right here avoid cache miss. Besides this costs
* no space because event.data must be anyway aligned to 64 bits.
*/
nxt_socket_t fd;
struct epoll_event event;
} nxt_epoll_change_t;
typedef struct {
int epoll;
uint32_t mode;
nxt_uint_t nchanges;
nxt_uint_t mchanges;
int mevents;
nxt_epoll_change_t *changes;
struct epoll_event *events;
#if (NXT_HAVE_EVENTFD)
nxt_work_handler_t post_handler;
nxt_event_fd_t eventfd;
uint32_t neventfd;
#endif
#if (NXT_HAVE_SIGNALFD)
nxt_event_fd_t signalfd;
#endif
} nxt_epoll_event_set_t;
extern const nxt_event_set_ops_t nxt_epoll_edge_event_set;
extern const nxt_event_set_ops_t nxt_epoll_level_event_set;
#endif
#if (NXT_HAVE_EVENTPORT)
typedef struct {
/*
* Although file descriptor can be obtained using pointer to a
* nxt_event_fd_t, nevertheless storing the descriptor right here
* avoid cache miss. Besides this costs no space on 64-bit platform.
*/
nxt_socket_t fd;
int events;
nxt_event_fd_t *event;
} nxt_eventport_change_t;
typedef struct {
int port;
nxt_uint_t nchanges;
nxt_uint_t mchanges;
u_int mevents;
nxt_eventport_change_t *changes;
port_event_t *events;
nxt_work_handler_t post_handler;
nxt_work_handler_t signal_handler;
} nxt_eventport_event_set_t;
extern const nxt_event_set_ops_t nxt_eventport_event_set;
#endif
#if (NXT_HAVE_DEVPOLL)
typedef struct {
uint8_t op;
short events;
/* A file descriptor stored because nxt_event_fd_t may be already freed. */
nxt_socket_t fd;
nxt_event_fd_t *event;
} nxt_devpoll_change_t;
typedef struct {
int devpoll;
int nchanges;
int mchanges;
int mevents;
nxt_devpoll_change_t *devpoll_changes;
struct pollfd *changes;
struct pollfd *events;
nxt_lvlhsh_t fd_hash;
} nxt_devpoll_event_set_t;
extern const nxt_event_set_ops_t nxt_devpoll_event_set;
#endif
#if (NXT_HAVE_POLLSET)
typedef struct {
uint8_t op;
uint8_t cmd;
short events;
/* A file descriptor stored because nxt_event_fd_t may be already freed. */
nxt_socket_t fd;
nxt_event_fd_t *event;
} nxt_pollset_change_t;
typedef struct {
pollset_t pollset;
int nchanges;
int mchanges;
int mevents;
nxt_pollset_change_t *pollset_changes;
struct poll_ctl *changes;
struct pollfd *events;
nxt_lvlhsh_t fd_hash;
} nxt_pollset_event_set_t;
extern const nxt_event_set_ops_t nxt_pollset_event_set;
#endif
typedef struct {
uint8_t op;
short events;
/* A file descriptor stored because nxt_event_fd_t may be already freed. */
nxt_socket_t fd;
nxt_event_fd_t *event;
} nxt_poll_change_t;
typedef struct {
nxt_uint_t max_nfds;
nxt_uint_t nfds;
nxt_uint_t nchanges;
nxt_uint_t mchanges;
nxt_poll_change_t *changes;
struct pollfd *poll_set;
nxt_lvlhsh_t fd_hash;
} nxt_poll_event_set_t;
extern const nxt_event_set_ops_t nxt_poll_event_set;
typedef struct {
int nfds;
uint32_t update_nfds; /* 1 bit */
nxt_event_fd_t **events;
fd_set main_read_fd_set;
fd_set main_write_fd_set;
fd_set work_read_fd_set;
fd_set work_write_fd_set;
} nxt_select_event_set_t;
extern const nxt_event_set_ops_t nxt_select_event_set;
union nxt_event_set_u {
#if (NXT_HAVE_KQUEUE)
nxt_kqueue_event_set_t kqueue;
#endif
#if (NXT_HAVE_EPOLL)
nxt_epoll_event_set_t epoll;
#endif
#if (NXT_HAVE_EVENTPORT)
nxt_eventport_event_set_t eventport;
#endif
#if (NXT_HAVE_DEVPOLL)
nxt_devpoll_event_set_t devpoll;
#endif
#if (NXT_HAVE_POLLSET)
nxt_pollset_event_set_t pollset;
#endif
nxt_poll_event_set_t poll;
nxt_select_event_set_t select;
};
nxt_int_t nxt_event_set_fd_hash_add(nxt_lvlhsh_t *lh, nxt_fd_t fd,
nxt_event_fd_t *ev);
void *nxt_event_set_fd_hash_get(nxt_lvlhsh_t *lh, nxt_fd_t fd);
void nxt_event_set_fd_hash_delete(nxt_lvlhsh_t *lh, nxt_fd_t fd,
nxt_bool_t ignore);
void nxt_event_set_fd_hash_destroy(nxt_lvlhsh_t *lh);
#define \
nxt_event_fd_disable(engine, ev) \
(engine)->event->disable((engine)->event_set, ev)
#define \
nxt_event_fd_close(engine, ev) \
(engine)->event->close((engine)->event_set, ev)
#define \
nxt_event_fd_enable_read(engine, ev) \
(engine)->event->enable_read((engine)->event_set, ev)
#define \
nxt_event_fd_enable_write(engine, ev) \
(engine)->event->enable_write((engine)->event_set, ev)
#define \
nxt_event_fd_disable_read(engine, ev) \
(engine)->event->disable_read((engine)->event_set, ev)
#define \
nxt_event_fd_disable_write(engine, ev) \
(engine)->event->disable_write((engine)->event_set, ev)
#define \
nxt_event_fd_block_read(engine, ev) \
do { \
if (nxt_event_fd_is_active((ev)->read)) { \
(engine)->event->block_read((engine)->event_set, ev); \
} \
} while (0)
#define \
nxt_event_fd_block_write(engine, ev) \
do { \
if (nxt_event_fd_is_active((ev)->write)) { \
(engine)->event->block_write((engine)->event_set, ev); \
} \
} while (0)
#define \
nxt_event_fd_oneshot_read(engine, ev) \
(engine)->event->oneshot_read((engine)->event_set, ev)
#define \
nxt_event_fd_oneshot_write(engine, ev) \
(engine)->event->oneshot_write((engine)->event_set, ev)
#define \
nxt_event_fd_enable_accept(engine, ev) \
(engine)->event->enable_accept((engine)->event_set, ev)
#endif /* _NXT_EVENT_SET_H_INCLUDED_ */

320
src/nxt_event_timer.c Normal file
View File

@@ -0,0 +1,320 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* Timer operations are batched to improve instruction and data
* cache locality of rbtree operations.
*
* nxt_event_timer_add() adds a timer to the changes array to add or to
* modify the timer. The changes are processed by nxt_event_timer_find().
*
* nxt_event_timer_disable() disables a timer. The disabled timer may
* however present in rbtree for a long time and may be eventually removed
* by nxt_event_timer_find() or nxt_event_timer_expire().
*
* nxt_event_timer_delete() removes a timer at once from both the rbtree and
* the changes array and should be used only if the timer memory must be freed.
*/
static nxt_int_t nxt_event_timer_rbtree_compare(nxt_rbtree_node_t *node1,
nxt_rbtree_node_t *node2);
static void nxt_event_timer_change(nxt_event_timers_t *timers,
nxt_event_timer_t *ev, nxt_msec_t time);
static void nxt_event_commit_timer_changes(nxt_event_timers_t *timers);
static void nxt_event_timer_drop_changes(nxt_event_timers_t *timers,
nxt_event_timer_t *ev);
nxt_int_t
nxt_event_timers_init(nxt_event_timers_t *timers, nxt_uint_t mchanges)
{
nxt_rbtree_init(&timers->tree, nxt_event_timer_rbtree_compare, NULL);
timers->mchanges = mchanges;
timers->changes = nxt_malloc(sizeof(nxt_event_timer_change_t) * mchanges);
if (nxt_fast_path(timers->changes != NULL)) {
return NXT_OK;
}
return NXT_ERROR;
}
static nxt_int_t
nxt_event_timer_rbtree_compare(nxt_rbtree_node_t *node1,
nxt_rbtree_node_t *node2)
{
nxt_event_timer_t *ev1, *ev2;
ev1 = (nxt_event_timer_t *) node1;
ev2 = (nxt_event_timer_t *) node2;
/*
* Timer values are distributed in small range, usually several minutes
* and overflow every 49 days if nxt_msec_t is stored in 32 bits.
* This signed comparison takes into account that overflow.
*/
/* ev1->time < ev2->time */
return nxt_msec_diff(ev1->time, ev2->time);
}
void
nxt_event_timer_add(nxt_event_engine_t *engine, nxt_event_timer_t *ev,
nxt_msec_t timer)
{
int32_t diff;
uint32_t time;
time = engine->timers.now + timer;
if (nxt_event_timer_is_in_tree(ev)) {
diff = nxt_msec_diff(time, ev->time);
/*
* Use the previous timer if difference between it and the
* new timer is less than required precision milliseconds:
* this decreases rbtree operations for fast connections.
*/
if (nxt_abs(diff) < ev->precision) {
nxt_log_debug(ev->log, "event timer previous: %D: %d:%M",
ev->ident, ev->state, time);
if (ev->state == NXT_EVENT_TIMER_DISABLED) {
ev->state = NXT_EVENT_TIMER_ACTIVE;
}
return;
}
nxt_log_debug(ev->log, "event timer change: %D: %d:%M",
ev->ident, ev->state, ev->time);
} else {
/*
* The timer's time is updated here just to log a correct
* value by debug logging in nxt_event_timer_disable().
* It could be updated only in nxt_event_commit_timer_changes()
* just before nxt_rbtree_insert().
*/
ev->time = time;
nxt_log_debug(ev->log, "event timer add: %D: %M:%M",
ev->ident, timer, time);
}
nxt_event_timer_change(&engine->timers, ev, time);
}
static void
nxt_event_timer_change(nxt_event_timers_t *timers, nxt_event_timer_t *ev,
nxt_msec_t time)
{
nxt_event_timer_change_t *ch;
if (timers->nchanges >= timers->mchanges) {
nxt_event_commit_timer_changes(timers);
}
ev->state = NXT_EVENT_TIMER_ACTIVE;
ch = &timers->changes[timers->nchanges];
ch->time = time;
ch->event = ev;
timers->nchanges++;
}
#if (NXT_DEBUG)
void
nxt_event_timer_disable(nxt_event_timer_t *ev)
{
nxt_log_debug(ev->log, "event timer disable: %D: %d:%M",
ev->ident, ev->state, ev->time);
ev->state = NXT_EVENT_TIMER_DISABLED;
}
#endif
void
nxt_event_timer_delete(nxt_event_engine_t *engine, nxt_event_timer_t *ev)
{
if (nxt_event_timer_is_in_tree(ev)) {
nxt_log_debug(ev->log, "event timer delete: %D: %d:%M",
ev->ident, ev->state, ev->time);
nxt_rbtree_delete(&engine->timers.tree, &ev->node);
nxt_event_timer_in_tree_clear(ev);
ev->state = NXT_EVENT_TIMER_DISABLED;
}
nxt_event_timer_drop_changes(&engine->timers, ev);
}
static void
nxt_event_timer_drop_changes(nxt_event_timers_t *timers, nxt_event_timer_t *ev)
{
nxt_event_timer_change_t *dst, *src, *end;
dst = timers->changes;
end = dst + timers->nchanges;
for (src = dst; src < end; src++) {
if (src->event == ev) {
continue;
}
if (dst != src) {
*dst = *src;
}
dst++;
}
timers->nchanges -= end - dst;
}
static void
nxt_event_commit_timer_changes(nxt_event_timers_t *timers)
{
nxt_event_timer_t *ev;
nxt_event_timer_change_t *ch, *end;
nxt_thread_log_debug("event timers changes: %ui", timers->nchanges);
ch = timers->changes;
end = ch + timers->nchanges;
while (ch < end) {
ev = ch->event;
if (ev->state != NXT_EVENT_TIMER_DISABLED) {
if (nxt_event_timer_is_in_tree(ev)) {
nxt_log_debug(ev->log, "event timer delete: %D: %d:%M",
ev->ident, ev->state, ev->time);
nxt_rbtree_delete(&timers->tree, &ev->node);
ev->time = ch->time;
}
nxt_log_debug(ev->log, "event timer add: %D: %M",
ev->ident, ev->time);
nxt_rbtree_insert(&timers->tree, &ev->node);
nxt_event_timer_in_tree_set(ev);
}
ch++;
}
timers->nchanges = 0;
}
nxt_msec_t
nxt_event_timer_find(nxt_event_engine_t *engine)
{
int32_t time;
nxt_rbtree_node_t *node, *next;
nxt_event_timer_t *ev;
if (engine->timers.nchanges != 0) {
nxt_event_commit_timer_changes(&engine->timers);
}
for (node = nxt_rbtree_min(&engine->timers.tree);
nxt_rbtree_is_there_successor(&engine->timers.tree, node);
node = next)
{
next = nxt_rbtree_node_successor(&engine->timers.tree, node);
ev = (nxt_event_timer_t *) node;
if (ev->state != NXT_EVENT_TIMER_DISABLED) {
if (ev->state == NXT_EVENT_TIMER_BLOCKED) {
nxt_log_debug(ev->log, "event timer blocked: %D: %M",
ev->ident, ev->time);
continue;
}
time = nxt_msec_diff(ev->time, engine->timers.now);
return (nxt_msec_t) nxt_max(time, 0);
}
/* Delete disabled timer. */
nxt_log_debug(ev->log, "event timer delete: %D: 0:%M",
ev->ident, ev->time);
nxt_rbtree_delete(&engine->timers.tree, &ev->node);
nxt_event_timer_in_tree_clear(ev);
}
return NXT_INFINITE_MSEC;
}
void
nxt_event_timer_expire(nxt_thread_t *thr, nxt_msec_t now)
{
nxt_rbtree_t *tree;
nxt_rbtree_node_t *node, *next;
nxt_event_timer_t *ev;
thr->engine->timers.now = now;
tree = &thr->engine->timers.tree;
for (node = nxt_rbtree_min(tree);
nxt_rbtree_is_there_successor(tree, node);
node = next)
{
ev = (nxt_event_timer_t *) node;
/* ev->time > now */
if (nxt_msec_diff(ev->time, now) > 0) {
return;
}
next = nxt_rbtree_node_successor(tree, node);
if (ev->state == NXT_EVENT_TIMER_BLOCKED) {
nxt_log_debug(ev->log, "event timer blocked: %D: %M",
ev->ident, ev->time);
continue;
}
nxt_log_debug(ev->log, "event timer delete: %D: %d:%M",
ev->ident, ev->state, ev->time);
nxt_rbtree_delete(tree, &ev->node);
nxt_event_timer_in_tree_clear(ev);
if (ev->state != NXT_EVENT_TIMER_DISABLED) {
ev->state = NXT_EVENT_TIMER_DISABLED;
nxt_thread_work_queue_add(thr, ev->work_queue, ev->handler,
ev, NULL, ev->log);
}
}
}

146
src/nxt_event_timer.h Normal file
View File

@@ -0,0 +1,146 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_EVENT_TIMER_H_INCLUDED_
#define _NXT_EVENT_TIMER_H_INCLUDED_
/* Valid values are between 1ms to 255ms. */
#define NXT_EVENT_TIMER_DEFAULT_PRECISION 100
//#define NXT_EVENT_TIMER_DEFAULT_PRECISION 1
#if (NXT_DEBUG)
#define NXT_EVENT_TIMER { NXT_RBTREE_NODE_INIT, 0, 0, 0, \
NULL, NULL, NULL, -1 }
#else
#define NXT_EVENT_TIMER { NXT_RBTREE_NODE_INIT, 0, 0, 0, \
NULL, NULL, NULL }
#endif
typedef struct {
/* The rbtree node must be the first field. */
NXT_RBTREE_NODE (node);
uint8_t state;
uint8_t precision;
nxt_msec_t time;
nxt_work_queue_t *work_queue;
nxt_work_handler_t handler;
nxt_log_t *log;
#if (NXT_DEBUG)
int32_t ident;
#endif
} nxt_event_timer_t;
typedef struct {
nxt_msec_t time;
nxt_event_timer_t *event;
} nxt_event_timer_change_t;
typedef struct {
nxt_rbtree_t tree;
/* An overflown milliseconds counter. */
nxt_msec_t now;
nxt_uint_t mchanges;
nxt_uint_t nchanges;
nxt_event_timer_change_t *changes;
} nxt_event_timers_t;
#define \
nxt_event_timer_data(ev, type, timer) \
nxt_container_of(ev, type, timer)
/*
* When timer resides in rbtree all links of its node are not NULL.
* A parent link is the nearst to other timer flags.
*/
#define \
nxt_event_timer_is_in_tree(ev) \
((ev)->node.parent != NULL)
#define \
nxt_event_timer_in_tree_set(ev)
/* Noop, because rbtree insertion sets a node's parent link. */
#define \
nxt_event_timer_in_tree_clear(ev) \
(ev)->node.parent = NULL
#define NXT_EVENT_TIMER_DISABLED 0
#define NXT_EVENT_TIMER_BLOCKED 1
#define NXT_EVENT_TIMER_ACTIVE 2
#if (NXT_DEBUG)
#define \
nxt_event_timer_ident(ev, val) \
(ev)->ident = (val)
#else
#define \
nxt_event_timer_ident(ev, val)
#endif
nxt_inline nxt_event_timer_t *
nxt_event_timer_create(int32_t ident)
{
nxt_event_timer_t *ev;
ev = nxt_zalloc(sizeof(nxt_event_timer_t));
if (ev == NULL) {
return NULL;
}
ev->precision = NXT_EVENT_TIMER_DEFAULT_PRECISION;
#if (NXT_DEBUG)
ev->ident = ident;
#endif
return ev;
}
nxt_int_t nxt_event_timers_init(nxt_event_timers_t *timers,
nxt_uint_t mchanges);
NXT_EXPORT void nxt_event_timer_add(nxt_event_engine_t *engine,
nxt_event_timer_t *ev, nxt_msec_t timer);
NXT_EXPORT void nxt_event_timer_delete(nxt_event_engine_t *engine,
nxt_event_timer_t *ev);
nxt_msec_t nxt_event_timer_find(nxt_event_engine_t *engine);
void nxt_event_timer_expire(nxt_thread_t *thr, nxt_msec_t now);
#if (NXT_DEBUG)
NXT_EXPORT void nxt_event_timer_disable(nxt_event_timer_t *ev);
#else
#define \
nxt_event_timer_disable(ev) \
(ev)->state = NXT_EVENT_TIMER_DISABLED
#endif
#endif /* _NXT_EVENT_TIMER_H_INCLUDED_ */

646
src/nxt_eventport.c Normal file
View File

@@ -0,0 +1,646 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* The event ports have been introduced in Solaris 10.
* The PORT_SOURCE_MQ and PORT_SOURCE_FILE sources have
* been added in OpenSolaris.
*/
static nxt_event_set_t *nxt_eventport_create(nxt_event_signals_t *signals,
nxt_uint_t mchanges, nxt_uint_t mevents);
static void nxt_eventport_free(nxt_event_set_t *event_set);
static void nxt_eventport_enable(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_disable(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_close(nxt_event_set_t *event_set, nxt_event_fd_t *ev);
static void nxt_eventport_drop_changes(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_enable_read(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_enable_write(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_enable_event(nxt_event_set_t *event_set,
nxt_event_fd_t *ev, nxt_uint_t events);
static void nxt_eventport_disable_read(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_disable_write(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_disable_event(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static nxt_int_t nxt_eventport_commit_changes(nxt_thread_t *thr,
nxt_eventport_event_set_t *es);
static void nxt_eventport_error_handler(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_eventport_block_read(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_block_write(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_oneshot_read(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_oneshot_write(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static void nxt_eventport_enable_accept(nxt_event_set_t *event_set,
nxt_event_fd_t *ev);
static nxt_int_t nxt_eventport_enable_post(nxt_event_set_t *event_set,
nxt_work_handler_t handler);
static void nxt_eventport_signal(nxt_event_set_t *event_set, nxt_uint_t signo);
static void nxt_eventport_poll(nxt_thread_t *thr, nxt_event_set_t *event_set,
nxt_msec_t timeout);
const nxt_event_set_ops_t nxt_eventport_event_set = {
"eventport",
nxt_eventport_create,
nxt_eventport_free,
nxt_eventport_enable,
nxt_eventport_disable,
nxt_eventport_disable,
nxt_eventport_close,
nxt_eventport_enable_read,
nxt_eventport_enable_write,
nxt_eventport_disable_read,
nxt_eventport_disable_write,
nxt_eventport_block_read,
nxt_eventport_block_write,
nxt_eventport_oneshot_read,
nxt_eventport_oneshot_write,
nxt_eventport_enable_accept,
NULL,
NULL,
nxt_eventport_enable_post,
nxt_eventport_signal,
nxt_eventport_poll,
&nxt_unix_event_conn_io,
NXT_NO_FILE_EVENTS,
NXT_NO_SIGNAL_EVENTS,
};
static nxt_event_set_t *
nxt_eventport_create(nxt_event_signals_t *signals, nxt_uint_t mchanges,
nxt_uint_t mevents)
{
nxt_event_set_t *event_set;
nxt_eventport_event_set_t *es;
event_set = nxt_zalloc(sizeof(nxt_eventport_event_set_t));
if (event_set == NULL) {
return NULL;
}
es = &event_set->eventport;
es->port = -1;
es->mchanges = mchanges;
es->mevents = mevents;
es->changes = nxt_malloc(sizeof(nxt_eventport_change_t) * mchanges);
if (es->changes == NULL) {
goto fail;
}
es->events = nxt_malloc(sizeof(port_event_t) * mevents);
if (es->events == NULL) {
goto fail;
}
es->port = port_create();
if (es->port == -1) {
nxt_main_log_emerg("port_create() failed %E", nxt_errno);
goto fail;
}
nxt_main_log_debug("port_create(): %d", es->port);
if (signals != NULL) {
es->signal_handler = signals->handler;
}
return event_set;
fail:
nxt_eventport_free(event_set);
return NULL;
}
static void
nxt_eventport_free(nxt_event_set_t *event_set)
{
nxt_eventport_event_set_t *es;
es = &event_set->eventport;
nxt_main_log_debug("eventport %d free", es->port);
if (es->port != -1) {
if (close(es->port) != 0) {
nxt_main_log_emerg("eventport close(%d) failed %E",
es->port, nxt_errno);
}
}
nxt_free(es->events);
nxt_free(es);
}
static void
nxt_eventport_enable(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
ev->read = NXT_EVENT_DEFAULT;
ev->write = NXT_EVENT_DEFAULT;
nxt_eventport_enable_event(event_set, ev, POLLIN | POLLOUT);
}
static void
nxt_eventport_disable(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
if (ev->read != NXT_EVENT_INACTIVE || ev->write != NXT_EVENT_INACTIVE) {
ev->read = NXT_EVENT_INACTIVE;
ev->write = NXT_EVENT_INACTIVE;
nxt_eventport_disable_event(event_set, ev);
}
}
static void
nxt_eventport_close(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
ev->read = NXT_EVENT_INACTIVE;
ev->write = NXT_EVENT_INACTIVE;
nxt_eventport_drop_changes(event_set, ev);
}
static void
nxt_eventport_drop_changes(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_eventport_change_t *dst, *src, *end;
nxt_eventport_event_set_t *es;
es = &event_set->eventport;
dst = es->changes;
end = dst + es->nchanges;
for (src = dst; src < end; src++) {
if (src->event == ev) {
continue;
}
if (dst != src) {
*dst = *src;
}
dst++;
}
es->nchanges -= end - dst;
}
static void
nxt_eventport_enable_read(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_uint_t events;
if (ev->read != NXT_EVENT_BLOCKED) {
events = (ev->write == NXT_EVENT_INACTIVE) ? POLLIN:
(POLLIN | POLLOUT);
nxt_eventport_enable_event(event_set, ev, events);
}
ev->read = NXT_EVENT_DEFAULT;
}
static void
nxt_eventport_enable_write(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_uint_t events;
if (ev->write != NXT_EVENT_BLOCKED) {
events = (ev->read == NXT_EVENT_INACTIVE) ? POLLOUT:
(POLLIN | POLLOUT);
nxt_eventport_enable_event(event_set, ev, events);
}
ev->write = NXT_EVENT_DEFAULT;
}
/*
* eventport changes are batched to improve instruction and data
* cache locality of several port_associate() and port_dissociate()
* calls followed by port_getn() call.
*/
static void
nxt_eventport_enable_event(nxt_event_set_t *event_set, nxt_event_fd_t *ev,
nxt_uint_t events)
{
nxt_eventport_change_t *ch;
nxt_eventport_event_set_t *es;
es = &event_set->eventport;
nxt_log_debug(ev->log, "port %d set event: fd:%d ev:%04XD u:%p",
es->port, ev->fd, events, ev);
if (es->nchanges >= es->mchanges) {
(void) nxt_eventport_commit_changes(nxt_thread(), es);
}
ch = &es->changes[es->nchanges++];
ch->fd = ev->fd;
ch->events = events;
ch->event = ev;
}
static void
nxt_eventport_disable_read(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
ev->read = NXT_EVENT_INACTIVE;
if (ev->write == NXT_EVENT_INACTIVE) {
nxt_eventport_disable_event(event_set, ev);
} else {
nxt_eventport_enable_event(event_set, ev, POLLOUT);
}
}
static void
nxt_eventport_disable_write(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
ev->write = NXT_EVENT_INACTIVE;
if (ev->read == NXT_EVENT_INACTIVE) {
nxt_eventport_disable_event(event_set, ev);
} else {
nxt_eventport_enable_event(event_set, ev, POLLIN);
}
}
static void
nxt_eventport_disable_event(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
nxt_eventport_change_t *ch;
nxt_eventport_event_set_t *es;
es = &event_set->eventport;
nxt_log_debug(ev->log, "port %d disable event : fd:%d", es->port, ev->fd);
if (es->nchanges >= es->mchanges) {
(void) nxt_eventport_commit_changes(nxt_thread(), es);
}
ch = &es->changes[es->nchanges++];
ch->fd = ev->fd;
ch->events = 0;
ch->event = ev;
}
static nxt_int_t
nxt_eventport_commit_changes(nxt_thread_t *thr, nxt_eventport_event_set_t *es)
{
int ret;
nxt_int_t retval;
nxt_event_fd_t *ev;
nxt_eventport_change_t *ch, *end;
nxt_log_debug(thr->log, "eventport %d changes:%ui", es->port, es->nchanges);
retval = NXT_OK;
ch = es->changes;
end = ch + es->nchanges;
do {
ev = ch->event;
if (ch->events != 0) {
nxt_log_debug(ev->log, "port_associate(%d): fd:%d ev:%04XD u:%p",
es->port, ch->fd, ch->events, ev);
ret = port_associate(es->port, PORT_SOURCE_FD, ch->fd,
ch->events, ev);
if (ret == 0) {
goto next;
}
nxt_log_alert(ev->log,
"port_associate(%d, %d, %d, %04XD) failed %E",
es->port, PORT_SOURCE_FD, ch->fd, ch->events,
nxt_errno);
} else {
nxt_log_debug(ev->log, "port_dissociate(%d): fd:%d",
es->port, ch->fd);
if (port_dissociate(es->port, PORT_SOURCE_FD, ch->fd) == 0) {
goto next;
}
nxt_log_alert(ev->log, "port_dissociate(%d, %d, %d) failed %E",
es->port, PORT_SOURCE_FD, ch->fd, nxt_errno);
}
nxt_thread_work_queue_add(thr, &thr->work_queue.main,
nxt_eventport_error_handler,
ev, ev->data, ev->log);
retval = NXT_ERROR;
next:
ch++;
} while (ch < end);
es->nchanges = 0;
return retval;
}
static void
nxt_eventport_error_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_event_fd_t *ev;
ev = obj;
ev->read = NXT_EVENT_INACTIVE;
ev->write = NXT_EVENT_INACTIVE;
ev->error_handler(thr, ev, data);
}
static void
nxt_eventport_block_read(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
if (ev->read != NXT_EVENT_INACTIVE) {
ev->read = NXT_EVENT_BLOCKED;
}
}
static void
nxt_eventport_block_write(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
if (ev->write != NXT_EVENT_INACTIVE) {
ev->write = NXT_EVENT_BLOCKED;
}
}
static void
nxt_eventport_oneshot_read(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
if (ev->read == NXT_EVENT_INACTIVE) {
ev->read = NXT_EVENT_DEFAULT;
nxt_eventport_enable_event(event_set, ev, POLLIN);
}
}
static void
nxt_eventport_oneshot_write(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
if (ev->write == NXT_EVENT_INACTIVE) {
ev->write = NXT_EVENT_DEFAULT;
nxt_eventport_enable_event(event_set, ev, POLLOUT);
}
}
static void
nxt_eventport_enable_accept(nxt_event_set_t *event_set, nxt_event_fd_t *ev)
{
ev->read = NXT_EVENT_LEVEL;
nxt_eventport_enable_event(event_set, ev, POLLIN);
}
static nxt_int_t
nxt_eventport_enable_post(nxt_event_set_t *event_set,
nxt_work_handler_t handler)
{
event_set->eventport.post_handler = handler;
return NXT_OK;
}
static void
nxt_eventport_signal(nxt_event_set_t *event_set, nxt_uint_t signo)
{
nxt_eventport_event_set_t *es;
es = &event_set->eventport;
nxt_thread_log_debug("port_send(%d, %ui)", es->port, signo);
if (port_send(es->port, signo, NULL) != 0) {
nxt_thread_log_alert("port_send(%d) failed %E", es->port, nxt_errno);
}
}
static void
nxt_eventport_poll(nxt_thread_t *thr, nxt_event_set_t *event_set,
nxt_msec_t timeout)
{
int n, events, signo;
uint_t nevents;
nxt_err_t err;
nxt_uint_t i, level;
timespec_t ts, *tp;
port_event_t *event;
nxt_event_fd_t *ev;
nxt_work_handler_t handler;
nxt_eventport_event_set_t *es;
es = &event_set->eventport;
if (es->nchanges != 0) {
if (nxt_eventport_commit_changes(thr, es) != NXT_OK) {
/* Error handlers have been enqueued on failure. */
timeout = 0;
}
}
if (timeout == NXT_INFINITE_MSEC) {
tp = NULL;
} else {
ts.tv_sec = timeout / 1000;
ts.tv_nsec = (timeout % 1000) * 1000000;
tp = &ts;
}
nxt_log_debug(thr->log, "port_getn(%d) timeout: %M", es->port, timeout);
/*
* A trap for possible error when Solaris does not update nevents
* if ETIME or EINTR is returned. This issue will be logged as
* "unexpected port_getn() event".
*
* The details are in OpenSolaris mailing list thread "port_getn()
* and timeouts - is this a bug or an undocumented feature?"
*/
event = &es->events[0];
event->portev_events = -1; /* invalid port events */
event->portev_source = -1; /* invalid port source */
event->portev_object = -1;
event->portev_user = (void *) -1;
nevents = 1;
n = port_getn(es->port, es->events, es->mevents, &nevents, tp);
/*
* 32-bit port_getn() on Solaris 10 x86 returns large negative
* values instead of 0 when returning immediately.
*/
err = (n < 0) ? nxt_errno : 0;
nxt_thread_time_update(thr);
if (n == -1) {
if (err == NXT_ETIME || err == NXT_EINTR) {
if (nevents != 0) {
nxt_log_alert(thr->log, "port_getn(%d) failed %E, events:%ud",
es->port, err, nevents);
}
}
if (err != NXT_ETIME) {
level = (err == NXT_EINTR) ? NXT_LOG_INFO : NXT_LOG_ALERT;
nxt_log_error(level, thr->log, "port_getn(%d) failed %E",
es->port, err);
if (err != NXT_EINTR) {
return;
}
}
}
nxt_log_debug(thr->log, "port_getn(%d) events: %d", es->port, nevents);
for (i = 0; i < nevents; i++) {
event = &es->events[i];
switch (event->portev_source) {
case PORT_SOURCE_FD:
ev = event->portev_user;
events = event->portev_events;
nxt_log_debug(ev->log, "eventport: fd:%d ev:%04Xd u:%p rd:%d wr:%d",
event->portev_object, events, ev,
ev->read, ev->write);
if (nxt_slow_path(events & (POLLERR | POLLHUP | POLLNVAL)) != 0) {
nxt_log_alert(ev->log,
"port_getn(%d) error fd:%d events:%04Xud",
es->port, ev->fd, events);
nxt_thread_work_queue_add(thr, &thr->work_queue.main,
nxt_eventport_error_handler,
ev, ev->data, ev->log);
continue;
}
if (events & POLLIN) {
ev->read_ready = 1;
if (ev->read != NXT_EVENT_BLOCKED) {
nxt_thread_work_queue_add(thr, ev->read_work_queue,
ev->read_handler,
ev, ev->data, ev->log);
}
if (ev->read != NXT_EVENT_LEVEL) {
ev->read = NXT_EVENT_INACTIVE;
}
}
if (events & POLLOUT) {
ev->write_ready = 1;
if (ev->write != NXT_EVENT_BLOCKED) {
nxt_thread_work_queue_add(thr, ev->write_work_queue,
ev->write_handler,
ev, ev->data, ev->log);
}
ev->write = NXT_EVENT_INACTIVE;
}
/*
* Reactivate counterpart direction, because the
* eventport is oneshot notification facility.
*/
events = (ev->read == NXT_EVENT_INACTIVE) ? 0 : POLLIN;
events |= (ev->write == NXT_EVENT_INACTIVE) ? 0 : POLLOUT;
if (events != 0) {
nxt_eventport_enable_event(event_set, ev, events);
}
break;
case PORT_SOURCE_USER:
nxt_log_debug(thr->log, "eventport: user ev:%d u:%p",
event->portev_events, event->portev_user);
signo = event->portev_events;
handler = (signo == 0) ? es->post_handler : es->signal_handler;
nxt_thread_work_queue_add(thr, &thr->work_queue.main, handler,
(void *) (uintptr_t) signo, NULL,
thr->log);
break;
default:
nxt_log_alert(thr->log, "unexpected port_getn(%d) event: "
"ev:%d src:%d obj:%p u:%p",
es->port, event->portev_events,
event->portev_source, event->portev_object,
event->portev_user);
}
}
}

View File

@@ -0,0 +1,307 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#define NXT_FASTCGI_DATA_MIDDLE 0
#define NXT_FASTCGI_DATA_END_ON_BORDER 1
#define NXT_FASTCGI_DATA_END 2
static nxt_int_t nxt_fastcgi_buffer(nxt_fastcgi_parse_t *fp, nxt_buf_t ***tail,
nxt_buf_t *in);
void
nxt_fastcgi_record_parse(nxt_fastcgi_parse_t *fp, nxt_buf_t *in)
{
u_char ch;
nxt_int_t ret, stream;
nxt_buf_t *b, *nb, **tail[2];
const char *msg;
nxt_thread_t *thr;
enum {
sw_fastcgi_version = 0,
sw_fastcgi_type,
sw_fastcgi_request_id_high,
sw_fastcgi_request_id_low,
sw_fastcgi_content_length_high,
sw_fastcgi_content_length_low,
sw_fastcgi_padding_length,
sw_fastcgi_reserved,
sw_fastcgi_data,
sw_fastcgi_padding,
sw_fastcgi_end_request,
} state;
fp->out[0] = NULL;
fp->out[1] = NULL;
tail[0] = &fp->out[0];
tail[1] = &fp->out[1];
state = fp->state;
for (b = in; b != NULL; b = b->next) {
if (nxt_buf_is_sync(b)) {
**tail = b;
*tail = &b->next;
continue;
}
fp->pos = b->mem.pos;
while (fp->pos < b->mem.free) {
/*
* The sw_fastcgi_data state is tested outside the
* switch to preserve fp->pos and to not touch memory.
*/
if (state == sw_fastcgi_data) {
/*
* fp->type here can be only NXT_FASTCGI_STDOUT
* or NXT_FASTCGI_STDERR. NXT_FASTCGI_END_REQUEST
* is tested in sw_fastcgi_reserved.
*/
stream = fp->type - NXT_FASTCGI_STDOUT;
ret = nxt_fastcgi_buffer(fp, &tail[stream], b);
if (ret == NXT_FASTCGI_DATA_MIDDLE) {
goto next;
}
if (nxt_slow_path(ret == NXT_ERROR)) {
fp->error = 1;
goto done;
}
if (fp->padding == 0) {
state = sw_fastcgi_version;
} else {
state = sw_fastcgi_padding;
}
if (ret == NXT_FASTCGI_DATA_END_ON_BORDER) {
goto next;
}
/* ret == NXT_FASTCGI_DATA_END */
}
ch = *fp->pos++;
nxt_thread_log_debug("fastcgi record byte: %02Xd", ch);
switch (state) {
case sw_fastcgi_version:
if (nxt_fast_path(ch == 1)) {
state = sw_fastcgi_type;
continue;
}
msg = "unsupported FastCGI protocol version";
goto fastcgi_error;
case sw_fastcgi_type:
switch (ch) {
case NXT_FASTCGI_STDOUT:
case NXT_FASTCGI_STDERR:
case NXT_FASTCGI_END_REQUEST:
fp->type = ch;
state = sw_fastcgi_request_id_high;
continue;
default:
msg = "invalid FastCGI record type";
goto fastcgi_error;
}
case sw_fastcgi_request_id_high:
/* FastCGI multiplexing is not supported. */
if (nxt_fast_path(ch == 0)) {
state = sw_fastcgi_request_id_low;
continue;
}
msg = "unexpected FastCGI request ID high byte";
goto fastcgi_error;
case sw_fastcgi_request_id_low:
if (nxt_fast_path(ch == 1)) {
state = sw_fastcgi_content_length_high;
continue;
}
msg = "unexpected FastCGI request ID low byte";
goto fastcgi_error;
case sw_fastcgi_content_length_high:
fp->length = ch << 8;
state = sw_fastcgi_content_length_low;
continue;
case sw_fastcgi_content_length_low:
fp->length |= ch;
state = sw_fastcgi_padding_length;
continue;
case sw_fastcgi_padding_length:
fp->padding = ch;
state = sw_fastcgi_reserved;
continue;
case sw_fastcgi_reserved:
nxt_thread_log_debug("fastcgi record type:%d "
"length:%uz padding:%d",
fp->type, fp->length, fp->padding);
if (nxt_fast_path(fp->type != NXT_FASTCGI_END_REQUEST)) {
state = sw_fastcgi_data;
continue;
}
state = sw_fastcgi_end_request;
continue;
case sw_fastcgi_data:
/*
* This state is processed before the switch.
* It added here just to suppress a warning.
*/
continue;
case sw_fastcgi_padding:
/*
* No special fast processing of padding
* because it usually takes just 1-7 bytes.
*/
fp->padding--;
if (fp->padding == 0) {
nxt_thread_log_debug("fastcgi record end");
state = sw_fastcgi_version;
}
continue;
case sw_fastcgi_end_request:
/* Just skip 8 bytes of END_REQUEST. */
fp->length--;
if (fp->length != 0) {
continue;
}
fp->done = 1;
nxt_thread_log_debug("fastcgi end request");
goto done;
}
}
if (b->retain == 0) {
/* No record data was found in a buffer. */
thr = nxt_thread();
nxt_thread_current_work_queue_add(thr, b->completion_handler,
b, b->parent, thr->log);
}
next:
continue;
}
fp->state = state;
return;
fastcgi_error:
nxt_thread_log_error(NXT_LOG_ERR, "upstream sent %s: %d", msg, ch);
fp->fastcgi_error = 1;
done:
nb = fp->last_buf(fp);
if (nxt_fast_path(nb != NULL)) {
*tail[0] = nb;
} else {
fp->error = 1;
}
// STUB: fp->fastcgi_error = 1;
// STUB: fp->error = 1;
return;
}
static nxt_int_t
nxt_fastcgi_buffer(nxt_fastcgi_parse_t *fp, nxt_buf_t ***tail, nxt_buf_t *in)
{
u_char *p;
size_t size;
nxt_buf_t *b;
if (fp->length == 0) {
return NXT_FASTCGI_DATA_END;
}
p = fp->pos;
size = in->mem.free - p;
if (fp->length >= size && in->retain == 0) {
/*
* Use original buffer if the buffer is lesser than or equal to
* FastCGI record size and this is the first record in the buffer.
*/
in->mem.pos = p;
**tail = in;
*tail = &in->next;
} else {
b = nxt_buf_mem_alloc(fp->mem_pool, 0, 0);
if (nxt_slow_path(b == NULL)) {
return NXT_ERROR;
}
**tail = b;
*tail = &b->next;
b->parent = in;
in->retain++;
b->mem.pos = p;
b->mem.start = p;
if (fp->length < size) {
p += fp->length;
fp->pos = p;
b->mem.free = p;
b->mem.end = p;
return NXT_FASTCGI_DATA_END;
}
b->mem.free = in->mem.free;
b->mem.end = in->mem.free;
}
fp->length -= size;
if (fp->length == 0) {
return NXT_FASTCGI_DATA_END_ON_BORDER;
}
return NXT_FASTCGI_DATA_MIDDLE;
}

756
src/nxt_fastcgi_source.c Normal file
View File

@@ -0,0 +1,756 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#define NXT_FASTCGI_RESPONDER 1
#define NXT_FASTCGI_KEEP_CONN 1
typedef struct {
u_char *buf;
uint32_t len;
u_char length[4];
} nxt_fastcgi_param_t;
#define \
nxt_fastcgi_set_record_length(p, length) \
do { \
uint32_t len = length; \
\
p[1] = (u_char) len; len >>= 8; \
p[0] = (u_char) len; \
} while (0)
nxt_inline size_t
nxt_fastcgi_param_length(u_char *p, uint32_t length)
{
if (nxt_fast_path(length < 128)) {
*p = (u_char) length;
return 1;
}
p[3] = (u_char) length; length >>= 8;
p[2] = (u_char) length; length >>= 8;
p[1] = (u_char) length; length >>= 8;
p[0] = (u_char) (length | 0x80);
return 4;
}
static nxt_buf_t *nxt_fastcgi_request_create(nxt_fastcgi_source_t *fs);
static nxt_int_t nxt_fastcgi_next_param(nxt_fastcgi_source_t *fs,
nxt_fastcgi_param_t *param);
static void nxt_fastcgi_source_record_filter(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_fastcgi_source_record_error(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_fastcgi_source_header_filter(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_fastcgi_source_sync_buffer(nxt_thread_t *thr,
nxt_fastcgi_source_t *fs, nxt_buf_t *b);
static nxt_int_t nxt_fastcgi_source_header_process(nxt_fastcgi_source_t *fs);
static nxt_int_t nxt_fastcgi_source_status(nxt_upstream_source_t *us,
nxt_name_value_t *nv);
static nxt_int_t nxt_fastcgi_source_content_length(nxt_upstream_source_t *us,
nxt_name_value_t *nv);
static void nxt_fastcgi_source_header_ready(nxt_fastcgi_source_t *fs,
nxt_buf_t *b);
static void nxt_fastcgi_source_body_filter(nxt_thread_t *thr, void *obj,
void *data);
static nxt_buf_t *nxt_fastcgi_source_last_buf(nxt_fastcgi_parse_t *fp);
static void nxt_fastcgi_source_error(nxt_stream_source_t *stream);
static void nxt_fastcgi_source_fail(nxt_fastcgi_source_t *fs);
/*
* A FastCGI request:
* FCGI_BEGIN_REQUEST record;
* Several FCGI_PARAMS records, the last FCGI_PARAMS record must have
* zero content length,
* Several FCGI_STDIN records, the last FCGI_STDIN record must have
* zero content length.
*/
static const uint8_t nxt_fastcgi_begin_request[] = {
1, /* FastCGI version. */
NXT_FASTCGI_BEGIN_REQUEST, /* The BEGIN_REQUEST record type. */
0, 1, /* Request ID. */
0, 8, /* Content length of the Role record. */
0, /* Padding length. */
0, /* Reserved. */
0, NXT_FASTCGI_RESPONDER, /* The Responder Role. */
0, /* Flags. */
0, 0, 0, 0, 0, /* Reserved. */
};
static const uint8_t nxt_fastcgi_params_record[] = {
1, /* FastCGI version. */
NXT_FASTCGI_PARAMS, /* The PARAMS record type. */
0, 1, /* Request ID. */
0, 0, /* Content length. */
0, /* Padding length. */
0, /* Reserved. */
};
static const uint8_t nxt_fastcgi_stdin_record[] = {
1, /* FastCGI version. */
NXT_FASTCGI_STDIN, /* The STDIN record type. */
0, 1, /* Request ID. */
0, 0, /* Content length. */
0, /* Padding length. */
0, /* Reserved. */
};
void
nxt_fastcgi_source_handler(nxt_upstream_source_t *us,
nxt_fastcgi_source_request_create_t request_create)
{
nxt_stream_source_t *stream;
nxt_fastcgi_source_t *fs;
fs = nxt_mem_zalloc(us->buffers.mem_pool, sizeof(nxt_fastcgi_source_t));
if (nxt_slow_path(fs == NULL)) {
goto fail;
}
us->protocol_source = fs;
fs->header_in.list = nxt_list_create(us->buffers.mem_pool, 8,
sizeof(nxt_name_value_t));
if (nxt_slow_path(fs->header_in.list == NULL)) {
goto fail;
}
fs->header_in.hash = us->header_hash;
fs->upstream = us;
fs->request_create = request_create;
stream = us->stream;
if (stream == NULL) {
stream = nxt_mem_zalloc(us->buffers.mem_pool,
sizeof(nxt_stream_source_t));
if (nxt_slow_path(stream == NULL)) {
goto fail;
}
us->stream = stream;
stream->upstream = us;
} else {
nxt_memzero(stream, sizeof(nxt_stream_source_t));
}
/*
* Create the FastCGI source filter chain:
* stream source | FastCGI record filter | FastCGI HTTP header filter
*/
stream->next = &fs->query;
stream->error_handler = nxt_fastcgi_source_error;
fs->record.next.context = fs;
fs->record.next.filter = nxt_fastcgi_source_header_filter;
fs->record.parse.last_buf = nxt_fastcgi_source_last_buf;
fs->record.parse.data = fs;
fs->record.parse.mem_pool = us->buffers.mem_pool;
fs->query.context = &fs->record.parse;
fs->query.filter = nxt_fastcgi_source_record_filter;
fs->header_in.content_length = -1;
stream->out = nxt_fastcgi_request_create(fs);
if (nxt_fast_path(stream->out != NULL)) {
nxt_memzero(&fs->u.header, sizeof(nxt_http_split_header_parse_t));
fs->u.header.mem_pool = fs->upstream->buffers.mem_pool;
nxt_stream_source_connect(stream);
return;
}
fail:
nxt_fastcgi_source_fail(fs);
}
static nxt_buf_t *
nxt_fastcgi_request_create(nxt_fastcgi_source_t *fs)
{
u_char *p, *record_length;
size_t len, size, max_record_size;
nxt_int_t ret;
nxt_buf_t *b, *req, **prev;
nxt_bool_t begin_request;
nxt_fastcgi_param_t param;
nxt_thread_log_debug("fastcgi request");
begin_request = 1;
param.len = 0;
prev = &req;
new_buffer:
ret = nxt_buf_pool_mem_alloc(&fs->upstream->buffers, 0);
if (nxt_slow_path(ret != NXT_OK)) {
return NULL;
}
b = fs->upstream->buffers.current;
fs->upstream->buffers.current = NULL;
*prev = b;
prev = &b->next;
new_record:
size = b->mem.end - b->mem.free;
size = nxt_align_size(size, 8) - 8;
/* The maximal FastCGI record content size is 65535. 65528 is 64K - 8. */
max_record_size = nxt_min(65528, size);
p = b->mem.free;
if (begin_request) {
/* TODO: fastcgi keep conn in flags. */
p = nxt_cpymem(p, nxt_fastcgi_begin_request, 16);
max_record_size -= 16;
begin_request = 0;
}
b->mem.free = nxt_cpymem(p, nxt_fastcgi_params_record, 8);
record_length = &p[4];
size = 0;
for ( ;; ) {
if (param.len == 0) {
ret = nxt_fastcgi_next_param(fs, &param);
if (nxt_slow_path(ret != NXT_OK)) {
if (nxt_slow_path(ret == NXT_ERROR)) {
return NULL;
}
/* ret == NXT_DONE */
break;
}
}
len = max_record_size;
if (nxt_fast_path(len >= param.len)) {
len = param.len;
param.len = 0;
} else {
param.len -= len;
}
nxt_thread_log_debug("fastcgi copy len:%uz", len);
b->mem.free = nxt_cpymem(b->mem.free, param.buf, len);
size += len;
max_record_size -= len;
if (nxt_slow_path(param.len != 0)) {
/* The record is full. */
param.buf += len;
nxt_thread_log_debug("fastcgi content size:%uz", size);
nxt_fastcgi_set_record_length(record_length, size);
/* The minimal size of aligned record with content is 16 bytes. */
if (b->mem.end - b->mem.free >= 16) {
goto new_record;
}
nxt_thread_log_debug("\"%*s\"", b->mem.free - b->mem.pos,
b->mem.pos);
goto new_buffer;
}
}
nxt_thread_log_debug("fastcgi content size:%uz", size);
nxt_fastcgi_set_record_length(record_length, size);
/* A padding length. */
size = 8 - size % 8;
record_length[2] = (u_char) size;
nxt_memzero(b->mem.free, size);
b->mem.free += size;
nxt_thread_log_debug("fastcgi padding:%uz", size);
if (b->mem.end - b->mem.free < 16) {
nxt_thread_log_debug("\"%*s\"", b->mem.free - b->mem.pos, b->mem.pos);
b = nxt_buf_mem_alloc(fs->upstream->buffers.mem_pool, 16, 0);
if (nxt_slow_path(b == NULL)) {
return NULL;
}
*prev = b;
prev = &b->next;
}
/* The end of FastCGI params. */
p = nxt_cpymem(b->mem.free, nxt_fastcgi_params_record, 8);
/* The end of FastCGI stdin. */
b->mem.free = nxt_cpymem(p, nxt_fastcgi_stdin_record, 8);
nxt_thread_log_debug("\"%*s\"", b->mem.free - b->mem.pos, b->mem.pos);
return req;
}
static nxt_int_t
nxt_fastcgi_next_param(nxt_fastcgi_source_t *fs, nxt_fastcgi_param_t *param)
{
nxt_int_t ret;
enum {
sw_name_length = 0,
sw_value_length,
sw_name,
sw_value,
};
switch (fs->state) {
case sw_name_length:
ret = fs->request_create(fs);
if (nxt_slow_path(ret != NXT_OK)) {
return ret;
}
nxt_thread_log_debug("fastcgi param \"%V: %V\"",
&fs->u.request.name, &fs->u.request.value);
fs->state = sw_value_length;
param->buf = param->length;
param->len = nxt_fastcgi_param_length(param->length,
fs->u.request.name.len);
break;
case sw_value_length:
fs->state = sw_name;
param->buf = param->length;
param->len = nxt_fastcgi_param_length(param->length,
fs->u.request.value.len);
break;
case sw_name:
fs->state = sw_value;
param->buf = fs->u.request.name.data;
param->len = fs->u.request.name.len;
break;
case sw_value:
fs->state = sw_name_length;
param->buf = fs->u.request.value.data;
param->len = fs->u.request.value.len;
break;
}
return NXT_OK;
}
static void
nxt_fastcgi_source_record_filter(nxt_thread_t *thr, void *obj, void *data)
{
size_t size;
u_char *p;
nxt_buf_t *b, *in;
nxt_fastcgi_source_t *fs;
nxt_fastcgi_source_record_t *fsr;
fsr = obj;
in = data;
nxt_log_debug(thr->log, "fastcgi source record filter");
if (nxt_slow_path(fsr->parse.done)) {
return;
}
nxt_fastcgi_record_parse(&fsr->parse, in);
fs = nxt_container_of(fsr, nxt_fastcgi_source_t, record);
if (fsr->parse.error) {
nxt_fastcgi_source_fail(fs);
return;
}
if (fsr->parse.fastcgi_error) {
/*
* Output all parsed before a FastCGI record error and close upstream.
*/
nxt_thread_current_work_queue_add(thr, nxt_fastcgi_source_record_error,
fs, NULL, thr->log);
}
/* Log FastCGI stderr output. */
for (b = fsr->parse.out[1]; b != NULL; b = b->next) {
for (p = b->mem.free - 1; p >= b->mem.pos; p--) {
if (*p != NXT_CR && *p != NXT_LF) {
break;
}
}
size = (p + 1) - b->mem.pos;
if (size != 0) {
nxt_log_error(NXT_LOG_ERR, thr->log,
"upstream sent in FastCGI stderr: \"%*s\"",
size, b->mem.pos);
}
b->completion_handler(thr, b, b->parent);
}
/* Process FastCGI stdout output. */
if (fsr->parse.out[0] != NULL) {
nxt_source_filter(thr, fs->upstream->work_queue, &fsr->next,
fsr->parse.out[0]);
}
}
static void
nxt_fastcgi_source_record_error(nxt_thread_t *thr, void *obj, void *data)
{
nxt_fastcgi_source_t *fs;
fs = obj;
nxt_fastcgi_source_fail(fs);
}
static void
nxt_fastcgi_source_header_filter(nxt_thread_t *thr, void *obj, void *data)
{
nxt_int_t ret;
nxt_buf_t *b;
nxt_fastcgi_source_t *fs;
fs = obj;
b = data;
do {
nxt_log_debug(thr->log, "fastcgi source header filter");
if (nxt_slow_path(nxt_buf_is_sync(b))) {
nxt_fastcgi_source_sync_buffer(thr, fs, b);
return;
}
for ( ;; ) {
ret = nxt_http_split_header_parse(&fs->u.header, &b->mem);
if (nxt_slow_path(ret != NXT_OK)) {
break;
}
ret = nxt_fastcgi_source_header_process(fs);
if (nxt_slow_path(ret != NXT_OK)) {
break;
}
}
if (nxt_fast_path(ret == NXT_DONE)) {
nxt_log_debug(thr->log, "fastcgi source header done");
nxt_fastcgi_source_header_ready(fs, b);
return;
}
if (nxt_fast_path(ret != NXT_AGAIN)) {
if (ret != NXT_ERROR) {
/* n == NXT_DECLINED: "\r" is not followed by "\n" */
nxt_log_error(NXT_LOG_ERR, thr->log,
"upstream sent invalid header line: \"%*s\\r...\"",
fs->u.header.parse.header_end
- fs->u.header.parse.header_name_start,
fs->u.header.parse.header_name_start);
}
/* ret == NXT_ERROR */
nxt_fastcgi_source_fail(fs);
return;
}
b = b->next;
} while (b != NULL);
}
static void
nxt_fastcgi_source_sync_buffer(nxt_thread_t *thr, nxt_fastcgi_source_t *fs,
nxt_buf_t *b)
{
if (nxt_buf_is_last(b)) {
nxt_log_error(NXT_LOG_ERR, thr->log,
"upstream closed prematurely connection");
} else {
nxt_log_error(NXT_LOG_ERR, thr->log, "%ui buffers %uz each are not "
"enough to process upstream response header",
fs->upstream->buffers.max,
fs->upstream->buffers.size);
}
/* The stream source sends only the last and the nobuf sync buffer. */
nxt_fastcgi_source_fail(fs);
}
static nxt_int_t
nxt_fastcgi_source_header_process(nxt_fastcgi_source_t *fs)
{
size_t len;
nxt_thread_t *thr;
nxt_name_value_t *nv;
nxt_lvlhsh_query_t lhq;
nxt_http_header_parse_t *hp;
nxt_upstream_name_value_t *unv;
thr = nxt_thread();
hp = &fs->u.header.parse;
len = hp->header_name_end - hp->header_name_start;
if (len > 255) {
nxt_log_error(NXT_LOG_INFO, thr->log,
"upstream sent too long header field name: \"%*s\"",
len, hp->header_name_start);
return NXT_ERROR;
}
nv = nxt_list_add(fs->header_in.list);
if (nxt_slow_path(nv == NULL)) {
return NXT_ERROR;
}
nv->hash = hp->header_hash;
nv->skip = 0;
nv->name_len = len;
nv->name_start = hp->header_name_start;
nv->value_len = hp->header_end - hp->header_start;
nv->value_start = hp->header_start;
nxt_log_debug(thr->log, "http header: \"%*s: %*s\"",
nv->name_len, nv->name_start, nv->value_len, nv->value_start);
lhq.key_hash = nv->hash;
lhq.key.len = nv->name_len;
lhq.key.data = nv->name_start;
lhq.proto = &nxt_upstream_header_hash_proto;
if (nxt_lvlhsh_find(&fs->header_in.hash, &lhq) == NXT_OK) {
unv = lhq.value;
if (unv->handler(fs->upstream, nv) == NXT_OK) {
return NXT_ERROR;
}
}
return NXT_OK;
}
static const nxt_upstream_name_value_t nxt_fastcgi_source_headers[]
nxt_aligned(32) =
{
{ nxt_fastcgi_source_status,
nxt_upstream_name_value("status") },
{ nxt_fastcgi_source_content_length,
nxt_upstream_name_value("content-length") },
};
nxt_int_t
nxt_fastcgi_source_hash_create(nxt_mem_pool_t *mp, nxt_lvlhsh_t *lh)
{
return nxt_upstream_header_hash_add(mp, lh, nxt_fastcgi_source_headers,
nxt_nitems(nxt_fastcgi_source_headers));
}
static nxt_int_t
nxt_fastcgi_source_status(nxt_upstream_source_t *us, nxt_name_value_t *nv)
{
nxt_int_t n;
nxt_str_t s;
nxt_fastcgi_source_t *fs;
s.len = nv->value_len;
s.data = nv->value_start;
n = nxt_str_int_parse(&s);
if (nxt_fast_path(n > 0)) {
fs = us->protocol_source;
fs->header_in.status = n;
return NXT_OK;
}
return NXT_ERROR;
}
static nxt_int_t
nxt_fastcgi_source_content_length(nxt_upstream_source_t *us,
nxt_name_value_t *nv)
{
nxt_off_t length;
nxt_fastcgi_source_t *fs;
length = nxt_off_t_parse(nv->value_start, nv->value_len);
if (nxt_fast_path(length > 0)) {
fs = us->protocol_source;
fs->header_in.content_length = length;
return NXT_OK;
}
return NXT_ERROR;
}
static void
nxt_fastcgi_source_header_ready(nxt_fastcgi_source_t *fs, nxt_buf_t *b)
{
/*
* Change the FastCGI source filter chain:
* stream source | FastCGI record filter | FastCGI body filter
*/
fs->record.next.filter = nxt_fastcgi_source_body_filter;
if (nxt_buf_mem_used_size(&b->mem) != 0) {
fs->rest = b;
}
if (fs->header_in.status == 0) {
/* The "200 OK" status by default. */
fs->header_in.status = 200;
}
fs->upstream->state->ready_handler(fs);
}
/*
* The FastCGI source body filter accumulates first body buffers before the next
* filter will be established and sets completion handler for the last buffer.
*/
static void
nxt_fastcgi_source_body_filter(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *b, *in;
nxt_fastcgi_source_t *fs;
fs = obj;
in = data;
nxt_log_debug(thr->log, "fastcgi source body filter");
for (b = in; b != NULL; b = b->next) {
if (nxt_buf_is_last(b)) {
b->data = fs->upstream->data;
b->completion_handler = fs->upstream->state->completion_handler;
}
}
if (fs->next != NULL) {
nxt_source_filter(thr, fs->upstream->work_queue, fs->next, in);
return;
}
nxt_buf_chain_add(&fs->rest, in);
}
static nxt_buf_t *
nxt_fastcgi_source_last_buf(nxt_fastcgi_parse_t *fp)
{
nxt_buf_t *b;
nxt_fastcgi_source_t *fs;
fs = fp->data;
b = nxt_buf_sync_alloc(fp->mem_pool, NXT_BUF_SYNC_LAST);
if (nxt_fast_path(b != NULL)) {
b->data = fs->upstream->data;
b->completion_handler = fs->upstream->state->completion_handler;
}
return b;
}
static void
nxt_fastcgi_source_error(nxt_stream_source_t *stream)
{
nxt_fastcgi_source_t *fs;
nxt_thread_log_debug("fastcgi source error");
fs = stream->upstream->protocol_source;
nxt_fastcgi_source_fail(fs);
}
static void
nxt_fastcgi_source_fail(nxt_fastcgi_source_t *fs)
{
nxt_thread_t *thr;
thr = nxt_thread();
nxt_log_debug(thr->log, "fastcgi source fail");
/* TODO: fail, next upstream, or bad gateway */
fs->upstream->state->error_handler(thr, fs, NULL);
}

92
src/nxt_fastcgi_source.h Normal file
View File

@@ -0,0 +1,92 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_FASTCGI_SOURCE_H_INCLUDED_
#define _NXT_FASTCGI_SOURCE_H_INCLUDED_
#define NXT_FASTCGI_BEGIN_REQUEST 1
#define NXT_FASTCGI_ABORT_REQUEST 2
#define NXT_FASTCGI_END_REQUEST 3
#define NXT_FASTCGI_PARAMS 4
#define NXT_FASTCGI_STDIN 5
#define NXT_FASTCGI_STDOUT 6
#define NXT_FASTCGI_STDERR 7
#define NXT_FASTCGI_DATA 8
typedef struct nxt_fastcgi_parse_s nxt_fastcgi_parse_t;
struct nxt_fastcgi_parse_s {
u_char *pos;
uint16_t length; /* 16 bits */
uint8_t padding;
uint8_t type;
uint8_t state;
uint8_t fastcgi_error; /* 1 bit */
uint8_t error; /* 1 bit */
uint8_t done; /* 1 bit */
/* FastCGI stdout and stderr buffer chains. */
nxt_buf_t *out[2];
nxt_buf_t *(*last_buf)(nxt_fastcgi_parse_t *fp);
void *data;
nxt_mem_pool_t *mem_pool;
};
typedef struct {
nxt_fastcgi_parse_t parse;
nxt_source_hook_t next;
} nxt_fastcgi_source_record_t;
typedef struct {
nxt_str_t name;
nxt_str_t value;
uintptr_t data[3];
} nxt_fastcgi_source_request_t;
typedef struct nxt_fastcgi_source_s nxt_fastcgi_source_t;
typedef nxt_int_t (*nxt_fastcgi_source_request_create_t)(
nxt_fastcgi_source_t *fs);
struct nxt_fastcgi_source_s {
nxt_source_hook_t query;
nxt_source_hook_t *next;
nxt_upstream_source_t *upstream;
nxt_fastcgi_source_request_create_t request_create;
nxt_upstream_header_in_t header_in;
nxt_buf_t *rest;
uint32_t state; /* 2 bits */
nxt_fastcgi_source_record_t record;
union {
nxt_fastcgi_source_request_t request;
nxt_http_split_header_parse_t header;
} u;
};
NXT_EXPORT void nxt_fastcgi_source_handler(nxt_upstream_source_t *us,
nxt_fastcgi_source_request_create_t request_create);
NXT_EXPORT nxt_int_t nxt_fastcgi_source_hash_create(nxt_mem_pool_t *mp,
nxt_lvlhsh_t *lh);
void nxt_fastcgi_record_parse(nxt_fastcgi_parse_t *fp, nxt_buf_t *in);
#endif /* _NXT_FASTCGI_SOURCE_H_INCLUDED_ */

446
src/nxt_fiber.c Normal file
View File

@@ -0,0 +1,446 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static char *nxt_fiber_create_stack(nxt_fiber_t *fib);
static void nxt_fiber_switch_stack(nxt_fiber_t *fib, jmp_buf *parent);
static void nxt_fiber_switch_handler(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_fiber_switch(nxt_thread_t *thr, nxt_fiber_t *fib);
static void nxt_fiber_timer_handler(nxt_thread_t *thr, void *obj,
void *data);
#define \
nxt_fiber_enqueue(thr, fib) \
nxt_thread_work_queue_add(thr, &(thr)->work_queue.main, \
nxt_fiber_switch_handler, fib, NULL, thr->log)
nxt_fiber_main_t *
nxt_fiber_main_create(nxt_event_engine_t *engine)
{
nxt_fiber_main_t *fm;
fm = nxt_zalloc(sizeof(nxt_fiber_main_t));
if (nxt_slow_path(fm == NULL)) {
return NULL;
}
fm->stack_size = 512 * 1024 - nxt_pagesize;
fm->idle = NULL;
return fm;
}
nxt_int_t
nxt_fiber_create(nxt_fiber_start_t start, void *data, size_t stack)
{
int ret;
jmp_buf parent;
nxt_fid_t fid;
nxt_fiber_t *fib;
nxt_thread_t *thr;
nxt_fiber_main_t *fm;
thr = nxt_thread();
fm = thr->engine->fibers;
fid = ++fm->fid;
if (fid == 0) {
fid = ++fm->fid;
}
fib = fm->idle;
if (fib != NULL) {
fm->idle = fib->next;
fib->fid = fid;
fib->start = start;
fib->data = data;
fib->main = fm;
nxt_log_debug(thr->log, "fiber create cached: %PF", fib->fid);
nxt_fiber_enqueue(thr, fib);
return NXT_OK;
}
nxt_log_debug(thr->log, "fiber create");
fib = nxt_malloc(sizeof(nxt_fiber_t));
if (nxt_slow_path(fib == NULL)) {
return NXT_ERROR;
}
fib->fid = fid;
fib->start = start;
fib->data = data;
fib->stack_size = fm->stack_size;
fib->main = fm;
fib->stack = nxt_fiber_create_stack(fib);
if (nxt_fast_path(fib->stack != NULL)) {
if (_setjmp(parent) != 0) {
nxt_log_debug(thr->log, "fiber create: %PF", fib->fid);
return NXT_OK;
}
nxt_fiber_switch_stack(fib, &parent);
/* It does not return if the switch was successful. */
}
ret = munmap(fib->stack - nxt_pagesize, fib->stack_size + nxt_pagesize);
if (nxt_slow_path(ret != 0)) {
nxt_log_alert(thr->log, "munmap() failed %E", nxt_errno);
}
nxt_free(fib);
return NXT_ERROR;
}
#if (NXT_LINUX)
static char *
nxt_fiber_create_stack(nxt_fiber_t *fib)
{
char *s;
size_t size;
size = fib->stack_size + nxt_pagesize;
s = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN, -1, 0);
if (nxt_slow_path(s == MAP_FAILED)) {
nxt_thread_log_alert("fiber stack "
"mmap(%uz, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN) failed %E",
size, nxt_errno);
return NULL;
}
if (nxt_slow_path(mprotect(s, nxt_pagesize, PROT_NONE) != 0)) {
nxt_thread_log_alert("fiber stack mprotect(%uz, PROT_NONE) failed %E",
size, nxt_errno);
return NULL;
}
s += nxt_pagesize;
nxt_thread_log_debug("fiber stack mmap: %p", s);
return s;
}
#else /* Generic version. */
static char *
nxt_fiber_create_stack(nxt_fiber_t *fib)
{
char *s;
size_t size;
size = fib->stack_size + nxt_pagesize;
s = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (nxt_slow_path(s == MAP_FAILED)) {
nxt_thread_log_alert("fiber stack "
"mmap(%uz, MAP_PRIVATE|MAP_ANON) failed %E",
size, nxt_errno);
return NULL;
}
if (nxt_slow_path(mprotect(s, nxt_pagesize, PROT_NONE) != 0)) {
nxt_thread_log_alert("fiber stack mprotect(%uz, PROT_NONE) failed %E",
size, nxt_errno);
return NULL;
}
s += nxt_pagesize;
nxt_thread_log_debug("fiber stack mmap: %p", s);
return s;
}
#endif
#if (NXT_LINUX && NXT_64BIT)
/*
* Linux 64-bit ucontext version. 64-bit glibc makecontext() passes
* pointers as signed int's. The bug has been fixed in glibc 2.8.
*/
static void nxt_fiber_trampoline(uint32_t fh, uint32_t fl, uint32_t ph,
uint32_t pl);
static void
nxt_fiber_switch_stack(nxt_fiber_t *fib, jmp_buf *parent)
{
ucontext_t uc;
nxt_thread_log_debug("fiber switch to stack: %p", fib->stack);
if (nxt_slow_path(getcontext(&uc) != 0)) {
nxt_thread_log_alert("getcontext() failed");
return;
}
uc.uc_link = NULL;
uc.uc_stack.ss_sp = fib->stack;
uc.uc_stack.ss_size = fib->stack_size;
makecontext(&uc, (void (*)(void)) nxt_fiber_trampoline, 4,
(uint32_t) ((uintptr_t) fib >> 32),
(uint32_t) ((uintptr_t) fib & 0xffffffff),
(uint32_t) ((uintptr_t) parent >> 32),
(uint32_t) ((uintptr_t) parent & 0xffffffff));
setcontext(&uc);
nxt_thread_log_alert("setcontext() failed");
}
static void
nxt_fiber_trampoline(uint32_t fh, uint32_t fl, uint32_t ph, uint32_t pl)
{
jmp_buf *parent;
nxt_fiber_t *fib;
nxt_thread_t *thr;
fib = (nxt_fiber_t *) (((uintptr_t) fh << 32) + fl);
parent = (jmp_buf *) (((uintptr_t) ph << 32) + pl);
thr = nxt_thread();
if (_setjmp(fib->jmp) == 0) {
nxt_log_debug(thr->log, "fiber return to parent stack");
nxt_fiber_enqueue(thr, fib);
_longjmp(*parent, 1);
nxt_unreachable();
}
nxt_log_debug(thr->log, "fiber start");
fib->start(fib->data);
nxt_fiber_exit(&fib->main->fiber, NULL);
nxt_unreachable();
}
#elif (NXT_HAVE_UCONTEXT)
/* Generic ucontext version. */
static void nxt_fiber_trampoline(nxt_fiber_t *fib, jmp_buf *parent);
static void
nxt_fiber_switch_stack(nxt_fiber_t *fib, jmp_buf *parent)
{
ucontext_t uc;
nxt_thread_log_debug("fiber switch to stack: %p", fib->stack);
if (nxt_slow_path(getcontext(&uc) != 0)) {
nxt_thread_log_alert("getcontext() failed");
return;
}
uc.uc_link = NULL;
uc.uc_stack.ss_sp = fib->stack;
uc.uc_stack.ss_size = fib->stack_size;
makecontext(&uc, (void (*)(void)) nxt_fiber_trampoline, 2, fib, parent);
setcontext(&uc);
#if !(NXT_SOLARIS)
/* Solaris declares setcontext() as __NORETURN. */
nxt_thread_log_alert("setcontext() failed");
#endif
}
static void
nxt_fiber_trampoline(nxt_fiber_t *fib, jmp_buf *parent)
{
nxt_thread_t *thr;
thr = nxt_thread();
if (_setjmp(fib->jmp) == 0) {
nxt_log_debug(thr->log, "fiber return to parent stack");
nxt_fiber_enqueue(thr, fib);
_longjmp(*parent, 1);
nxt_unreachable();
}
nxt_log_debug(thr->log, "fiber start");
fib->start(fib->data);
nxt_fiber_exit(&fib->main->fiber, NULL);
nxt_unreachable();
}
#else
#error No ucontext(3) interface.
#endif
static void
nxt_fiber_switch_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_fiber_t *fib;
fib = obj;
nxt_fiber_switch(thr, fib);
nxt_unreachable();
}
static void
nxt_fiber_switch(nxt_thread_t *thr, nxt_fiber_t *fib)
{
nxt_log_debug(thr->log, "fiber switch: %PF", fib->fid);
thr->fiber = fib;
_longjmp(fib->jmp, 1);
nxt_unreachable();
}
nxt_fiber_t *
nxt_fiber_self(nxt_thread_t *thr)
{
return (nxt_fast_path(thr != NULL)) ? thr->fiber : NULL;
}
void
nxt_fiber_yield(void)
{
nxt_fiber_t *fib;
nxt_thread_t *thr;
thr = nxt_thread();
fib = thr->fiber;
if (_setjmp(fib->jmp) == 0) {
nxt_log_debug(thr->log, "fiber yield");
nxt_fiber_enqueue(thr, fib);
nxt_fiber_switch(thr, &fib->main->fiber);
nxt_unreachable();
}
nxt_log_debug(thr->log, "fiber yield return");
}
void
nxt_fiber_sleep(nxt_msec_t timeout)
{
nxt_fiber_t *fib;
nxt_thread_t *thr;
thr = nxt_thread();
fib = thr->fiber;
fib->timer.work_queue = &thr->work_queue.main;
fib->timer.handler = nxt_fiber_timer_handler;
fib->timer.log = &nxt_main_log;
nxt_event_timer_add(thr->engine, &fib->timer, timeout);
if (_setjmp(fib->jmp) == 0) {
nxt_log_debug(thr->log, "fiber sleep: %T", timeout);
nxt_fiber_switch(thr, &fib->main->fiber);
nxt_unreachable();
}
nxt_log_debug(thr->log, "fiber sleep return");
}
static void
nxt_fiber_timer_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_fiber_t *fib;
nxt_event_timer_t *ev;
ev = obj;
nxt_log_debug(thr->log, "fiber timer handler");
fib = nxt_event_timer_data(ev, nxt_fiber_t, timer);
nxt_fiber_switch(thr, fib);
nxt_unreachable();
}
void
nxt_fiber_wait(void)
{
nxt_fiber_t *fib;
nxt_thread_t *thr;
thr = nxt_thread();
fib = thr->fiber;
if (_setjmp(fib->jmp) == 0) {
nxt_log_debug(thr->log, "fiber wait");
nxt_fiber_switch(thr, &fib->main->fiber);
nxt_unreachable();
}
nxt_log_debug(thr->log, "fiber wait return");
}
void
nxt_fiber_exit(nxt_fiber_t *next, void *data)
{
nxt_fiber_t *fib;
nxt_thread_t *thr;
thr = nxt_thread();
fib = thr->fiber;
nxt_log_debug(thr->log, "fiber exit");
/* TODO: limit idle fibers. */
fib->next = fib->main->idle;
fib->main->idle = fib;
nxt_fiber_switch(thr, next);
nxt_unreachable();
}

54
src/nxt_fiber.h Normal file
View File

@@ -0,0 +1,54 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_FIBER_H_INCLUDED_
#define _NXT_FIBER_H_INCLUDED_
typedef struct nxt_fiber_main_s nxt_fiber_main_t;
typedef void (*nxt_fiber_start_t)(void *data);
typedef uint32_t nxt_fid_t;
#define nxt_fiber_id(f) (f)->fid;
typedef struct nxt_fiber_s nxt_fiber_t;
struct nxt_fiber_s {
jmp_buf jmp;
nxt_fid_t fid;
nxt_fiber_start_t start;
void *data;
char *stack;
size_t stack_size;
nxt_err_t err;
nxt_fiber_main_t *main;
nxt_fiber_t *next;
nxt_event_timer_t timer;
};
struct nxt_fiber_main_s {
nxt_fiber_t fiber;
nxt_fiber_t *idle;
size_t stack_size;
nxt_fid_t fid;
};
nxt_fiber_main_t *nxt_fiber_main_create(nxt_event_engine_t *engine);
nxt_int_t nxt_fiber_create(nxt_fiber_start_t start, void *data, size_t stack);
void nxt_fiber_yield(void);
void nxt_fiber_sleep(nxt_msec_t timeout);
void nxt_fiber_wait(void);
void nxt_fiber_exit(nxt_fiber_t *next, void *data);
NXT_EXPORT nxt_fiber_t *nxt_fiber_self(nxt_thread_t *thr);
#endif /* _NXT_FIBER_H_INCLUDED_ */

601
src/nxt_file.c Normal file
View File

@@ -0,0 +1,601 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
nxt_int_t
nxt_file_open(nxt_file_t *file, nxt_uint_t mode, nxt_uint_t create,
nxt_file_access_t access)
{
nxt_thread_debug(thr);
#ifdef __CYGWIN__
mode |= O_BINARY;
#endif
/* O_NONBLOCK is to prevent blocking on FIFOs, special devices, etc. */
mode |= (O_NONBLOCK | create);
file->fd = open((char *) file->name, mode, access);
file->error = (file->fd == -1) ? nxt_errno : 0;
nxt_thread_time_debug_update(thr);
nxt_log_debug(thr->log, "open(\"%FN\", 0x%uXi, 0x%uXi): %FD err:%d",
file->name, mode, access, file->fd, file->error);
if (file->fd != -1) {
return NXT_OK;
}
if (file->log_level != 0) {
nxt_thread_log_error(file->log_level, "open(\"%FN\") failed %E",
file->name, file->error);
}
return NXT_ERROR;
}
void
nxt_file_close(nxt_file_t *file)
{
nxt_thread_log_debug("close(%FD)", file->fd);
if (close(file->fd) != 0) {
nxt_thread_log_error(NXT_LOG_CRIT, "close(%FD, \"%FN\") failed %E",
file->fd, file->name, nxt_errno);
}
}
ssize_t
nxt_file_write(nxt_file_t *file, const u_char *buf, size_t size,
nxt_off_t offset)
{
ssize_t n;
nxt_thread_debug(thr);
n = pwrite(file->fd, buf, size, offset);
file->error = (n < 0) ? nxt_errno : 0;
nxt_thread_time_debug_update(thr);
nxt_log_debug(thr->log, "pwrite(%FD, %p, %uz, %O): %z",
file->fd, buf, size, offset, n);
if (nxt_fast_path(n >= 0)) {
return n;
}
nxt_thread_log_error(NXT_LOG_CRIT,
"pwrite(%FD, \"%FN\", %p, %uz, %O) failed %E",
file->fd, file->name, buf, size,
offset, file->error);
return NXT_ERROR;
}
ssize_t
nxt_file_read(nxt_file_t *file, u_char *buf, size_t size, nxt_off_t offset)
{
ssize_t n;
nxt_thread_debug(thr);
n = pread(file->fd, buf, size, offset);
file->error = (n <= 0) ? nxt_errno : 0;
nxt_thread_time_debug_update(thr);
nxt_log_debug(thr->log, "pread(%FD, %p, %uz, %O): %z",
file->fd, buf, size, offset, n);
if (nxt_fast_path(n >= 0)) {
return n;
}
nxt_thread_log_error(NXT_LOG_CRIT,
"pread(%FD, \"%FN\", %p, %uz, %O) failed %E",
file->fd, file->name, buf, size,
offset, file->error);
return NXT_ERROR;
}
#if (NXT_HAVE_READAHEAD)
/* FreeBSD 8.0 fcntl(F_READAHEAD, size) enables read ahead up to the size. */
void
nxt_file_read_ahead(nxt_file_t *file, nxt_off_t offset, size_t size)
{
int ret;
u_char buf;
ret = fcntl(file->fd, F_READAHEAD, (int) size);
nxt_thread_log_debug("fcntl(%FD, F_READAHEAD, %uz): %d",
file->fd, size, ret);
if (nxt_fast_path(ret != -1)) {
(void) nxt_file_read(file, &buf, 1, offset);
return;
}
nxt_thread_log_error(NXT_LOG_CRIT,
"fcntl(%FD, \"%FN\", F_READAHEAD, %uz) failed %E",
file->fd, file->name, size, nxt_errno);
}
#elif (NXT_HAVE_POSIX_FADVISE)
/*
* POSIX_FADV_SEQUENTIAL
* Linux doubles the default readahead window size of a backing device
* which is usually 128K.
*
* FreeBSD does nothing.
*
* POSIX_FADV_WILLNEED
* Linux preloads synchronously up to 2M of specified file region in
* the kernel page cache. Linux-specific readahead(2) syscall does
* the same. Both operations are blocking despite posix_fadvise(2)
* claims the opposite.
*
* FreeBSD does nothing.
*/
void
nxt_file_read_ahead(nxt_file_t *file, nxt_off_t offset, size_t size)
{
nxt_err_t err;
err = posix_fadvise(file->fd, offset, size, POSIX_FADV_WILLNEED);
nxt_thread_log_debug("posix_fadvise(%FD, \"%FN\", %O, %uz, %d): %d",
file->fd, file->name, offset, size,
POSIX_FADV_WILLNEED, err);
if (nxt_fast_path(err == 0)) {
return;
}
nxt_thread_log_error(NXT_LOG_CRIT,
"posix_fadvise(%FD, \"%FN\", %O, %uz, %d) failed %E",
file->fd, file->name, offset, size,
POSIX_FADV_WILLNEED, err);
}
#elif (NXT_HAVE_RDAHEAD)
/* MacOSX fcntl(F_RDAHEAD). */
void
nxt_file_read_ahead(nxt_file_t *file, nxt_off_t offset, size_t size)
{
int ret;
u_char buf;
ret = fcntl(file->fd, F_RDAHEAD, 1);
nxt_thread_log_debug("fcntl(%FD, F_RDAHEAD, 1): %d", file->fd, ret);
if (nxt_fast_path(ret != -1)) {
(void) nxt_file_read(file, &buf, 1, offset);
return;
}
nxt_thread_log_error(NXT_LOG_CRIT,
"fcntl(%FD, \"%FN\", F_RDAHEAD, 1) failed %E",
file->fd, file->name, nxt_errno);
}
#else
void
nxt_file_read_ahead(nxt_file_t *file, nxt_off_t offset, size_t size)
{
u_char buf;
(void) nxt_file_read(file, &buf, 1, offset);
}
#endif
nxt_int_t
nxt_file_info(nxt_file_t *file, nxt_file_info_t *fi)
{
int n;
if (file->fd == NXT_FILE_INVALID) {
n = stat((char *) file->name, fi);
file->error = (n != 0) ? nxt_errno : 0;
nxt_thread_log_debug("stat(\"%FN)\": %d", file->name, n);
if (n == 0) {
return NXT_OK;
}
if (file->log_level != 0) {
nxt_thread_log_error(file->log_level, "stat(\"%FN\") failed %E",
file->name, file->error);
}
return NXT_ERROR;
} else {
n = fstat(file->fd, fi);
file->error = (n != 0) ? nxt_errno : 0;
nxt_thread_log_debug("fstat(%FD): %d", file->fd, n);
if (n == 0) {
return NXT_OK;
}
/* Use NXT_LOG_CRIT because fstat() error on open file is strange. */
nxt_thread_log_error(NXT_LOG_CRIT, "fstat(%FD, \"%FN\") failed %E",
file->fd, file->name, file->error);
return NXT_ERROR;
}
}
nxt_int_t
nxt_file_delete(nxt_file_name_t *name)
{
nxt_thread_log_debug("unlink(\"%FN\")", name);
if (nxt_fast_path(unlink((char *) name) == 0)) {
return NXT_OK;
}
nxt_thread_log_alert("unlink(\"%FN\") failed %E", name, nxt_errno);
return NXT_ERROR;
}
nxt_int_t
nxt_file_set_access(nxt_file_name_t *name, nxt_file_access_t access)
{
if (nxt_fast_path(chmod((char *) name, access) == 0)) {
return NXT_OK;
}
nxt_thread_log_alert("chmod(\"%FN\") failed %E", name, nxt_errno);
return NXT_ERROR;
}
nxt_int_t
nxt_file_rename(nxt_file_name_t *old_name, nxt_file_name_t *new_name)
{
int ret;
nxt_thread_log_debug("rename(\"%FN\", \"%FN\")", old_name, new_name);
ret = rename((char *) old_name, (char *) new_name);
if (nxt_fast_path(ret == 0)) {
return NXT_OK;
}
nxt_thread_log_alert("rename(\"%FN\", \"%FN\") failed %E",
old_name, new_name, nxt_errno);
return NXT_ERROR;
}
/*
* ioctl(FIONBIO) sets a non-blocking mode using one syscall,
* thereas fcntl(F_SETFL, O_NONBLOCK) needs to learn the current state
* using fcntl(F_GETFL).
*
* ioctl() and fcntl() are syscalls at least in Linux 2.2, FreeBSD 2.x,
* and Solaris 7.
*
* Linux 2.4 uses BKL for ioctl() and fcntl(F_SETFL).
* Linux 2.6 does not use BKL.
*/
#if (NXT_HAVE_FIONBIO)
nxt_int_t
nxt_fd_nonblocking(nxt_fd_t fd)
{
int nb;
nb = 1;
if (nxt_fast_path(ioctl(fd, FIONBIO, &nb) != -1)) {
return NXT_OK;
}
nxt_thread_log_alert("ioctl(%d, FIONBIO) failed %E", fd, nxt_errno);
return NXT_ERROR;
}
nxt_int_t
nxt_fd_blocking(nxt_fd_t fd)
{
int nb;
nb = 0;
if (nxt_fast_path(ioctl(fd, FIONBIO, &nb) != -1)) {
return NXT_OK;
}
nxt_thread_log_alert("ioctl(%d, !FIONBIO) failed %E", fd, nxt_errno);
return NXT_ERROR;
}
#else /* !(NXT_HAVE_FIONBIO) */
nxt_int_t
nxt_fd_nonblocking(nxt_fd_t fd)
{
int flags;
flags = fcntl(fd, F_GETFL);
if (nxt_slow_path(flags == -1)) {
nxt_thread_log_alert("fcntl(%d, F_GETFL) failed %E", fd, nxt_errno);
return NXT_ERROR;
}
flags |= O_NONBLOCK;
if (nxt_slow_path(fcntl(fd, F_SETFL, flags) == -1)) {
nxt_thread_log_alert("fcntl(%d, F_SETFL, O_NONBLOCK) failed %E",
fd, nxt_errno);
return NXT_ERROR;
}
return NXT_OK;
}
nxt_int_t
nxt_fd_blocking(nxt_fd_t fd)
{
int flags;
flags = fcntl(fd, F_GETFL);
if (nxt_slow_path(flags == -1)) {
nxt_thread_log_alert("fcntl(%d, F_GETFL) failed %E",
fd, nxt_errno);
return NXT_ERROR;
}
flags &= O_NONBLOCK;
if (nxt_slow_path(fcntl(fd, F_SETFL, flags) == -1)) {
nxt_thread_log_alert("fcntl(%d, F_SETFL, !O_NONBLOCK) failed %E",
fd, nxt_errno);
return NXT_ERROR;
}
return NXT_OK;
}
#endif /* NXT_HAVE_FIONBIO */
ssize_t
nxt_fd_write(nxt_fd_t fd, u_char *buf, size_t size)
{
ssize_t n;
nxt_err_t err;
n = write(fd, buf, size);
err = (n == -1) ? nxt_errno : 0;
nxt_thread_log_debug("write(%FD, %p, %uz): %z", fd, buf, size, n);
if (nxt_slow_path(n <= 0)) {
nxt_thread_log_alert("write(%FD) failed %E", fd, err);
}
return n;
}
ssize_t
nxt_fd_read(nxt_fd_t fd, u_char *buf, size_t size)
{
ssize_t n;
nxt_err_t err;
n = read(fd, buf, size);
err = (n == -1) ? nxt_errno : 0;
nxt_thread_log_debug("read(%FD, %p, %uz): %z", fd, buf, size, n);
if (nxt_slow_path(n <= 0)) {
if (err == NXT_EAGAIN) {
return 0;
}
nxt_thread_log_alert("read(%FD) failed %E", fd, err);
}
return n;
}
void
nxt_fd_close(nxt_fd_t fd)
{
nxt_thread_log_debug("close(%FD)", fd);
if (nxt_slow_path(close(fd) != 0)) {
nxt_thread_log_error(NXT_LOG_CRIT, "close(%FD) failed %E",
fd, nxt_errno);
}
}
/*
* nxt_file_redirect() redirects the file to the fd descriptor.
* Then the fd descriptor is closed.
*/
nxt_int_t
nxt_file_redirect(nxt_file_t *file, nxt_fd_t fd)
{
nxt_thread_log_debug("dup2(%FD, %FD, \"%FN\")", fd, file->fd, file->name);
if (dup2(fd, file->fd) == -1) {
nxt_thread_log_error(NXT_LOG_CRIT, "dup2(%FD, %FD, \"%FN\") failed %E",
fd, file->fd, file->name, nxt_errno);
return NXT_ERROR;
}
if (close(fd) != 0) {
nxt_thread_log_error(NXT_LOG_CRIT, "close(%FD, \"%FN\") failed %E",
fd, file->name, nxt_errno);
return NXT_ERROR;
}
return NXT_OK;
}
/* nxt_file_stderr() redirects the stderr descriptor to the file. */
nxt_int_t
nxt_file_stderr(nxt_file_t *file)
{
nxt_thread_log_debug("dup2(%FD, %FD, \"%FN\")",
file->fd, STDERR_FILENO, file->name);
if (dup2(file->fd, STDERR_FILENO) != -1) {
return NXT_OK;
}
nxt_thread_log_error(NXT_LOG_CRIT, "dup2(%FD, %FD, \"%FN\") failed %E",
file->fd, STDERR_FILENO, file->name);
return NXT_ERROR;
}
nxt_int_t
nxt_stderr_start(void)
{
int flags, fd;
flags = fcntl(nxt_stderr, F_GETFL);
if (flags != -1) {
/*
* If the stderr output of a multithreaded application is
* redirected to a file:
* Linux, Solaris and MacOSX do not write atomically to the output;
* MacOSX besides adds zeroes to the output.
* O_APPEND fixes this.
*/
(void) fcntl(nxt_stderr, F_SETFL, flags | O_APPEND);
} else {
/*
* The stderr descriptor is closed before application start.
* Reserve the stderr descriptor for future use. Errors are
* ignored because anyway they could be written nowhere.
*/
fd = open("/dev/null", O_WRONLY | O_APPEND);
if (fd != -1) {
(void) dup2(fd, nxt_stderr);
if (fd != nxt_stderr) {
(void) close(fd);
}
}
}
return flags;
}
nxt_int_t
nxt_pipe_create(nxt_fd_t *pp, nxt_bool_t nbread, nxt_bool_t nbwrite)
{
if (pipe(pp) != 0) {
nxt_thread_log_alert("pipe() failed %E", nxt_errno);
return NXT_ERROR;
}
nxt_thread_log_debug("pipe(): %FD:%FD", pp[0], pp[1]);
if (nbread) {
if (nxt_fd_nonblocking(pp[0]) != NXT_OK) {
return NXT_ERROR;
}
}
if (nbwrite) {
if (nxt_fd_nonblocking(pp[1]) != NXT_OK) {
return NXT_ERROR;
}
}
return NXT_OK;
}
void
nxt_pipe_close(nxt_fd_t *pp)
{
nxt_thread_log_debug("pipe close(%FD:%FD)", pp[0], pp[1]);
if (close(pp[0]) != 0) {
nxt_thread_log_alert("pipe close (%FD) failed %E", pp[0], nxt_errno);
}
if (close(pp[1]) != 0) {
nxt_thread_log_alert("pipe close(%FD) failed %E", pp[1], nxt_errno);
}
}
size_t
nxt_dir_current(char *buf, size_t len)
{
if (nxt_fast_path(getcwd(buf, len) != NULL)) {
return nxt_strlen(buf);
}
nxt_thread_log_alert("getcwd(%uz) failed %E", len, nxt_errno);
return 0;
}

195
src/nxt_file.h Normal file
View File

@@ -0,0 +1,195 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_UNIX_FILE_H_INCLUDED_
#define _NXT_UNIX_FILE_H_INCLUDED_
typedef int nxt_fd_t;
#define NXT_FILE_INVALID -1
typedef nxt_uint_t nxt_file_access_t;
typedef struct stat nxt_file_info_t;
#define NXT_FILE_SYSTEM_NAME_UTF8 1
typedef u_char nxt_file_name_t;
typedef struct {
size_t len;
nxt_file_name_t *start;
} nxt_file_name_str_t;
#define \
nxt_file_name_str_set(file_name, mem_pool, name) \
((file_name) = (nxt_file_name_t *) (name), NXT_OK)
#define \
nxt_file_name_alloc(mem_pool, len) \
nxt_mem_nalloc(mem_pool, len)
#define \
nxt_file_name_copy(dst, src, len) \
nxt_cpymem(dst, src, len)
#define \
nxt_file_name_add(dst, src, len) \
nxt_cpymem(dst, src, len)
#if (NXT_HAVE_CASELESS_FILESYSTEM)
/* MacOSX, Cygwin. */
#define \
nxt_file_name_eq(fn1, fn2) \
(nxt_strcasecmp(fn1, fn2) == 0)
#else
#define \
nxt_file_name_eq(fn1, fn2) \
(nxt_strcmp(fn1, fn2) == 0)
#endif
#define \
nxt_file_name_is_absolute(name) \
(name[0] == '/')
#define NXT_MAX_PATH_LEN MAXPATHLEN
typedef enum {
NXT_FILE_UNKNOWN = 0,
NXT_FILE_REGULAR,
NXT_FILE_DIRECTORY,
} nxt_file_type_t;
typedef struct {
nxt_file_name_t *name;
/* Both are int's. */
nxt_fd_t fd;
nxt_err_t error;
#define NXT_FILE_ACCESSED_LONG_AGO 0xffff
/*
* Number of seconds ago the file content was last
* read. The maximum value is about 18 hours.
*/
uint16_t accessed;
uint8_t type; /* nxt_file_type_t */
/*
* Log open() file error with given log level if it is non zero.
* Note that zero log level is NXT_LOG_EMERG.
*/
uint8_t log_level;
nxt_time_t mtime;
nxt_off_t size;
} nxt_file_t;
NXT_EXPORT nxt_int_t nxt_file_open(nxt_file_t *file, nxt_uint_t mode,
nxt_uint_t create, nxt_file_access_t access);
#define nxt_file_open_n "open"
/* The file open access modes. */
#define NXT_FILE_RDONLY O_RDONLY
#define NXT_FILE_WRONLY O_WRONLY
#define NXT_FILE_RDWR O_RDWR
#define NXT_FILE_APPEND (O_WRONLY | O_APPEND)
/* The file creation modes. */
#define NXT_FILE_CREATE_OR_OPEN O_CREAT
#define NXT_FILE_OPEN 0
#define NXT_FILE_TRUNCATE (O_CREAT | O_TRUNC)
/* The file access rights. */
#define NXT_FILE_DEFAULT_ACCESS 0644
#define NXT_FILE_OWNER_ACCESS 0600
NXT_EXPORT void nxt_file_close(nxt_file_t *file);
NXT_EXPORT ssize_t nxt_file_write(nxt_file_t *file, const u_char *buf,
size_t size, nxt_off_t offset);
NXT_EXPORT ssize_t nxt_file_read(nxt_file_t *file, u_char *buf, size_t size,
nxt_off_t offset);
NXT_EXPORT void nxt_file_read_ahead(nxt_file_t *file, nxt_off_t offset,
size_t size);
NXT_EXPORT nxt_int_t nxt_file_info(nxt_file_t *file, nxt_file_info_t *fi);
#define \
nxt_is_dir(fi) \
(S_ISDIR((fi)->st_mode))
#define \
nxt_is_file(fi) \
(S_ISREG((fi)->st_mode))
#define \
nxt_file_size(fi) \
(fi)->st_size
#define \
nxt_file_mtime(fi) \
(fi)->st_mtime
NXT_EXPORT nxt_int_t nxt_file_delete(nxt_file_name_t *name);
NXT_EXPORT nxt_int_t nxt_file_set_access(nxt_file_name_t *name,
nxt_file_access_t access);
NXT_EXPORT nxt_int_t nxt_file_rename(nxt_file_name_t *old_name,
nxt_file_name_t *new_name);
NXT_EXPORT nxt_int_t nxt_fd_nonblocking(nxt_fd_t fd);
NXT_EXPORT nxt_int_t nxt_fd_blocking(nxt_fd_t fd);
NXT_EXPORT ssize_t nxt_fd_write(nxt_fd_t fd, u_char *buf, size_t size);
NXT_EXPORT ssize_t nxt_fd_read(nxt_fd_t fd, u_char *buf, size_t size);
NXT_EXPORT void nxt_fd_close(nxt_fd_t fd);
NXT_EXPORT nxt_int_t nxt_file_redirect(nxt_file_t *file, nxt_fd_t fd);
NXT_EXPORT nxt_int_t nxt_file_stderr(nxt_file_t *file);
NXT_EXPORT nxt_int_t nxt_stderr_start(void);
#define nxt_stdout STDOUT_FILENO
#define nxt_stderr STDERR_FILENO
#define \
nxt_write_console(fd, buf, size) \
write(fd, buf, size)
#define \
nxt_write_syslog(priority, message) \
syslog(priority, "%s", message)
NXT_EXPORT nxt_int_t nxt_pipe_create(nxt_fd_t *pp, nxt_bool_t nbread,
nxt_bool_t nbwrite);
NXT_EXPORT void nxt_pipe_close(nxt_fd_t *pp);
NXT_EXPORT size_t nxt_dir_current(char *buf, size_t len);
#endif /* _NXT_UNIX_FILE_H_INCLUDED_ */

508
src/nxt_file_cache.c Normal file
View File

@@ -0,0 +1,508 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static nxt_int_t nxt_file_cache_lvlhsh_test(nxt_lvlhsh_key_t *hkey, void *data);
static nxt_work_handler_t nxt_file_cache_query_locked(nxt_file_cache_t *cache,
nxt_file_cache_query_t *q, nxt_lvlhsh_key_t *hkey);
static nxt_work_handler_t nxt_file_cache_node_hold(nxt_file_cache_t *cache,
nxt_file_cache_query_t *q, nxt_lvlhsh_key_t *hkey);
static nxt_work_handler_t nxt_file_cache_node_test(nxt_file_cache_t *cache,
nxt_file_cache_query_t *q);
static void nxt_file_cache_wait_handler(void *data);
static void nxt_file_cache_timeout_handler(nxt_event_timer_t *ev);
static void nxt_file_cache_wake_handler(void *data);
static nxt_file_cache_node_t *nxt_file_cache_node_alloc(nxt_cache_t *cache);
static void nxt_file_cache_node_free(nxt_file_cache_t *cache,
nxt_file_cache_node_t *node, nxt_bool_t fast);
static nxt_file_cache_query_wait_t *nxt_file_cache_query_wait_alloc(
nxt_file_cache_t *cache, nxt_bool_t *fast);
static void nxt_file_cache_query_wait_free(nxt_file_cache_t *cache,
nxt_file_cache_query_wait_t *qw);
static void nxt_file_cache_lock(nxt_file_cache_t *cache);
static void nxt_file_cache_unlock(nxt_file_cache_t *cache);
void
nxt_file_cache_init(nxt_cache_t *cache)
{
static const nxt_lvlhsh_ctx_t ctx = {
nxt_file_cache_lvlhsh_test,
nxt_lvlhsh_alloc,
nxt_lvlhsh_free,
0,
};
/* lvlhsh with large first level. */
cache->lvlhsh.shift[1] = 10;
cache->lvlhsh.ctx = &ctx;
cache->start_time = nxt_thread_time();
}
static nxt_int_t
nxt_file_cache_lvlhsh_test(nxt_lvlhsh_key_t *hkey, void *data)
{
nxt_file_cache_node_t *node;
node = data;
if (nxt_strmem_eq(&hkey->key, node->key_data, node->key_len)) {
return NXT_OK;
}
return NXT_DECLINED;
}
void
nxt_file_cache_query(nxt_file_cache_t *cache, nxt_file_cache_query_t *q)
{
nxt_lvlhsh_key_t hkey;
nxt_work_handler_t handler;
if (cache != NULL) {
hkey.key.len = q->key_len;
hkey.key.data = q->key_data;
hkey.key_hash = nxt_murmur_hash2(q->key_data, q->key_len);
hkey.replace = 0;
nxt_file_cache_lock(cache);
handler = nxt_file_cache_query_locked(cache, q, &hkey);
nxt_file_cache_unlock(cache);
} else {
handler = q->state->nocache_handler;
}
handler(q);
}
static nxt_work_handler_t
nxt_file_cache_query_locked(nxt_file_cache_t *cache, nxt_file_cache_query_t *q,
nxt_lvlhsh_key_t *hkey)
{
nxt_int_t ret;
nxt_bool_t fast;
nxt_work_handler_t handler;
nxt_file_cache_node_t *node, *sentinel;
nxt_file_cache_query_wait_t *qw;
nxt_file_cache_query_state_t *state;
state = q->state;
sentinel = nxt_file_cache_node_alloc(cache);
if (nxt_slow_path(sentinel == NULL)) {
return state->error_handler;
}
sentinel->key_data = q->key_data;
sentinel->key_len = q->key_len;
hkey->value = sentinel;
/*
* Try to insert an empty sentinel node to hold updating
* process if there is no existent cache node in cache.
*/
ret = nxt_lvlhsh_insert(&cache->lvlhsh, hkey);
if (ret == NXT_OK) {
/* The sentinel node was successully added. */
q->node = sentinel;
sentinel->updating = 1;
return state->update_handler;
}
nxt_cache_node_free(cache, sentinel, 1);
if (ret == NXT_ERROR) {
return state->error_handler;
}
/* NXT_DECLINED: a cache node exists. */
node = hkey->value;
node->count++;
q->node = node;
handler = nxt_cache_node_test(cache, q);
if (handler == NULL) {
/* Add the node to a wait queue. */
qw = nxt_cache_query_wait_alloc(cache, &fast);
if (nxt_slow_path(qw == NULL)) {
return state->error_handler;
}
if (!fast) {
/* The node state may be changed during slow allocation. */
handler = nxt_cache_node_test(cache, q);
if (handler != NULL) {
nxt_cache_query_wait_free(cache, qw);
return handler;
}
}
qw->query = q;
qw->next = node->waiting;
qw->busy = 0;
qw->deleted = 0;
qw->pid = nxt_pid;
qw->engine = nxt_thread_event_engine();
qw->handler = nxt_cache_wake_handler;
qw->cache = cache;
node->waiting = qw;
return nxt_cache_wait_handler;
}
return handler;
}
static nxt_work_handler_t
nxt_cache_node_test(nxt_cache_t *cache, nxt_cache_query_t *q)
{
nxt_time_t expiry;
nxt_cache_node_t *node;
nxt_cache_query_state_t *state;
q->stale = 0;
state = q->state;
node = q->node;
expiry = cache->start_time + node->expiry;
if (nxt_thread_time() < expiry) {
return state->ready_handler;
}
/*
* A valid stale or empty sentinel cache node.
* The sentinel node can be only in updating state.
*/
if (node->updating) {
if (node->expiry != 0) {
/* A valid stale cache node. */
q->stale = 1;
if (q->use_stale) {
return state->stale_handler;
}
}
return NULL;
}
/* A valid stale cache node is not being updated now. */
q->stale = 1;
if (q->use_stale) {
if (q->update_stale) {
node->updating = 1;
return state->update_stale_handler;
}
return state->stale_handler;
}
node->updating = 1;
return state->update_handler;
}
static void
nxt_cache_wait_handler(void *data)
{
nxt_thread_t *thr;
nxt_event_timer_t *ev;
nxt_cache_query_t *q;
q = data;
if (&q->timeout == 0) {
return;
}
ev = &q->timer;
if (!nxt_event_timer_is_set(ev)) {
thr = nxt_thread();
ev->log = thr->log;
ev->handler = nxt_cache_timeout_handler;
ev->data = q;
nxt_event_timer_ident(ev, -1);
nxt_event_timer_add(thr->engine, ev, q->timeout);
}
}
static void
nxt_cache_timeout_handler(nxt_event_timer_t *ev)
{
nxt_cache_query_t *q;
q = ev->data;
q->state->timeout_handler(q);
}
static void
nxt_cache_wake_handler(void *data)
{
nxt_cache_t *cache;
nxt_work_handler_t handler;
nxt_cache_query_t *q;
nxt_cache_query_wait_t *qw;
qw = data;
q = qw->query;
cache = qw->cache;
nxt_cache_lock(cache);
handler = nxt_cache_node_test(cache, q);
if (handler == NULL) {
/* Wait again. */
qw->next = q->node->waiting;
q->node->waiting = qw;
}
nxt_cache_unlock(cache);
if (handler != NULL) {
nxt_cache_query_wait_free(cache, qw);
}
handler(q);
}
static nxt_cache_node_t *
nxt_cache_node_alloc(nxt_cache_t *cache)
{
nxt_queue_node_t *qn;
nxt_cache_node_t *node;
qn = nxt_queue_first(&cache->free_nodes);
if (nxt_fast_path(qn != nxt_queue_tail(&cache->free_nodes))) {
cache->nfree_nodes--;
nxt_queue_remove(qn);
node = nxt_queue_node_data(qn, nxt_cache_node_t, queue);
nxt_memzero(node, sizeof(nxt_cache_node_t));
return node;
}
nxt_cache_unlock(cache);
node = cache->alloc(cache->data, sizeof(nxt_cache_node_t));
nxt_cache_lock(cache);
return node;
}
static void
nxt_cache_node_free(nxt_cache_t *cache, nxt_cache_node_t *node, nxt_bool_t fast)
{
if (fast || cache->nfree_nodes < 32) {
nxt_queue_insert_head(&cache->free_nodes, &node->queue);
cache->nfree_nodes++;
return;
}
nxt_cache_unlock(cache);
cache->free(cache->data, node);
nxt_cache_lock(cache);
}
static nxt_cache_query_wait_t *
nxt_cache_query_wait_alloc(nxt_cache_t *cache, nxt_bool_t *fast)
{
nxt_cache_query_wait_t *qw;
qw = cache->free_query_wait;
if (nxt_fast_path(qw != NULL)) {
cache->free_query_wait = qw->next;
cache->nfree_query_wait--;
*fast = 1;
return qw;
}
nxt_cache_unlock(cache);
qw = cache->alloc(cache->data, sizeof(nxt_cache_query_wait_t));
*fast = 0;
nxt_cache_lock(cache);
return qw;
}
static void
nxt_cache_query_wait_free(nxt_cache_t *cache, nxt_cache_query_wait_t *qw)
{
if (cache->nfree_query_wait < 32) {
qw->next = cache->free_query_wait;
cache->free_query_wait = qw;
cache->nfree_query_wait++;
return;
}
nxt_cache_unlock(cache);
cache->free(cache->data, qw);
nxt_cache_lock(cache);
}
#if 0
nxt_int_t
nxt_cache_update(nxt_cache_t *cache, nxt_cache_node_t *node)
{
nxt_lvlhsh_key_t hkey;
if (node->expiry == 0) {
/* An empty sentinel node. */
nxt_cache_release(cache, node);
return;
}
hkey.key.len = node->key_len;
hkey.key.data = node->key_data;
hkey.key_hash = nxt_murmur_hash2(node->key_data, node->key_len);
hkey.replace = 1;
hkey.value = node;
node->count = 1;
if (nxt_lvlhsh_insert(&cache->lvlhsh, &hkey) != NXT_OK) {
return NXT_ERROR;
}
node = hkey.value;
if (node != NULL) {
if (node->count != 0) {
node->delete = 1;
} else {
// delete cache node
}
}
return NXT_OK;
}
#endif
void
nxt_cache_node_release(nxt_cache_t *cache, nxt_cache_node_t *node)
{
nxt_bool_t delete;
nxt_cache_lock(cache);
delete = nxt_cache_node_release_locked(cache, node);
nxt_cache_unlock(cache);
if (delete) {
nxt_thread_work_queue_add(cache->delete_handler, node);
}
}
nxt_bool_t
nxt_cache_node_release_locked(nxt_cache_t *cache, nxt_cache_node_t *node)
{
#if 0
nxt_lvlhsh_key_t hkey;
#endif
node->count--;
if (node->count != 0) {
return 0;
}
if (!node->deleted) {
/*
* A cache node is locked whilst its count is non zero.
* To minimize number of operations the node's place in expiry
* queue can be updated only if the node is not currently used.
*/
node->accessed = nxt_thread_time() - cache->start_time;
nxt_queue_remove(&node->queue);
nxt_queue_insert_head(&cache->expiry_queue, &node->queue);
return 0;
}
#if 0
hkey.key.len = node->key_len;
hkey.key.data = node->key_data;
hkey.key_hash = nxt_murmur_hash2(node->key_data, node->key_len);
nxt_lvlhsh_delete(&cache->lvlhsh, &hkey);
#endif
return 1;
}
static void
nxt_file_cache_lock(nxt_file_cache_t *cache)
{
if (cache->shared) {
nxt_thread_spin_lock(&cache->lock);
}
}
static void
nxt_file_cache_unlock(nxt_file_cache_t *cache)
{
if (cache->shared) {
nxt_thread_spin_unlock(&cache->lock);
}
}

201
src/nxt_file_name.c Normal file
View File

@@ -0,0 +1,201 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* Supported formats:
* %s null-terminated string
* %*s length and string
* %FN nxt_file_name_t *
* %V nxt_str_t *
* %Z '\0', this null is not counted in file name lenght.
*/
nxt_int_t
nxt_file_name_create(nxt_mem_pool_t *mp, nxt_file_name_str_t *file_name,
const char *format, ...)
{
u_char ch, *p;
size_t len;
va_list args;
nxt_str_t *v;
nxt_bool_t zero;
const char *fmt;
nxt_file_name_t *dst, *fn;
va_start(args, format);
fmt = format;
zero = 0;
len = 0;
for ( ;; ) {
ch = *fmt++;
if (ch != '%') {
if (ch != '\0') {
len++;
continue;
}
break;
}
ch = *fmt++;
switch (ch) {
case 'V':
v = va_arg(args, nxt_str_t *);
if (nxt_fast_path(v != NULL)) {
len += v->len;
}
continue;
case 's':
p = va_arg(args, u_char *);
if (nxt_fast_path(p != NULL)) {
while (*p != '\0') {
p++;
len++;
}
}
continue;
case '*':
len += va_arg(args, u_int);
fmt++;
continue;
case 'F':
ch = *fmt++;
if (nxt_fast_path(ch == 'N')) {
fn = va_arg(args, nxt_file_name_t *);
if (nxt_fast_path(fn != NULL)) {
while (*fn != '\0') {
fn++;
len += sizeof(nxt_file_name_t);
}
}
}
continue;
case 'Z':
zero = 1;
len++;
continue;
default:
continue;
}
}
va_end(args);
if (len == 0) {
return NXT_ERROR;
}
file_name->len = len - zero;
fn = nxt_file_name_alloc(mp, len);
if (nxt_slow_path(fn == NULL)) {
return NXT_ERROR;
}
file_name->start = fn;
dst = fn;
va_start(args, format);
fmt = format;
for ( ;; ) {
ch = *fmt++;
if (ch != '%') {
if (ch != '\0') {
*dst++ = (nxt_file_name_t) ch;
continue;
}
break;
}
ch = *fmt++;
switch (ch) {
case 'V':
v = va_arg(args, nxt_str_t *);
if (nxt_fast_path(v != NULL)) {
dst = nxt_file_name_add(dst, v->data, v->len);
}
continue;
case 's':
p = va_arg(args, u_char *);
if (nxt_fast_path(p != NULL)) {
while (*p != '\0') {
*dst++ = (nxt_file_name_t) (*p++);
}
}
continue;
case '*':
len += va_arg(args, u_int);
ch = *fmt++;
if (nxt_fast_path(ch == 's')) {
p = va_arg(args, u_char *);
dst = nxt_file_name_add(dst, p, len);
}
continue;
case 'F':
ch = *fmt++;
if (nxt_fast_path(ch == 'N')) {
fn = va_arg(args, nxt_file_name_t *);
if (nxt_fast_path(fn != NULL)) {
while (*fn != '\0') {
*dst++ = *fn++;
}
}
}
continue;
case 'Z':
*dst++ = '\0';
continue;
default:
continue;
}
}
va_end(args);
return NXT_OK;
}

15
src/nxt_file_name.h Normal file
View File

@@ -0,0 +1,15 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_FILE_NAME_H_INCLUDED_
#define _NXT_FILE_NAME_H_INCLUDED_
NXT_EXPORT nxt_int_t nxt_file_name_create(nxt_mem_pool_t *mp,
nxt_file_name_str_t *fn, const char *format, ...);
#endif /* _NXT_FILE_NAME_H_INCLUDED_ */

145
src/nxt_freebsd_sendfile.c Normal file
View File

@@ -0,0 +1,145 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/*
* sendfile() has been introduced in FreeBSD 3.1,
* however, early implementation had various bugs.
* This code supports FreeBSD 5.0 implementation.
*/
#ifdef NXT_TEST_BUILD_FREEBSD_SENDFILE
ssize_t nxt_freebsd_event_conn_io_sendfile(nxt_event_conn_t *c, nxt_buf_t *b,
size_t limit);
static int nxt_sys_sendfile(int fd, int s, off_t offset, size_t nbytes,
struct sf_hdtr *hdtr, off_t *sbytes, int flags)
{
return -1;
}
#else
#define nxt_sys_sendfile sendfile
#endif
ssize_t
nxt_freebsd_event_conn_io_sendfile(nxt_event_conn_t *c, nxt_buf_t *b,
size_t limit)
{
size_t file_size;
ssize_t n;
nxt_buf_t *fb;
nxt_err_t err;
nxt_off_t sent;
nxt_uint_t nhd, ntr;
struct iovec hd[NXT_IOBUF_MAX], tr[NXT_IOBUF_MAX];
struct sf_hdtr hdtr, *ht;
nxt_sendbuf_coalesce_t sb;
sb.buf = b;
sb.iobuf = hd;
sb.nmax = NXT_IOBUF_MAX;
sb.sync = 0;
sb.size = 0;
sb.limit = limit;
nhd = nxt_sendbuf_mem_coalesce(&sb);
if (nhd == 0 && sb.sync) {
return 0;
}
if (sb.buf == NULL || !nxt_buf_is_file(sb.buf)) {
return nxt_event_conn_io_writev(c, hd, nhd);
}
fb = sb.buf;
file_size = nxt_sendbuf_file_coalesce(&sb);
if (file_size == 0) {
return nxt_event_conn_io_writev(c, hd, nhd);
}
sb.iobuf = tr;
ntr = nxt_sendbuf_mem_coalesce(&sb);
/*
* Disposal of surplus kernel operations
* if there are no headers or trailers.
*/
ht = NULL;
nxt_memzero(&hdtr, sizeof(struct sf_hdtr));
if (nhd != 0) {
ht = &hdtr;
hdtr.headers = hd;
hdtr.hdr_cnt = nhd;
}
if (ntr != 0) {
ht = &hdtr;
hdtr.trailers = tr;
hdtr.trl_cnt = ntr;
}
nxt_log_debug(c->socket.log, "sendfile(%FD, %d, @%O, %uz) hd:%ui tr:%ui",
fb->file->fd, c->socket.fd, fb->file_pos, file_size,
nhd, ntr);
sent = 0;
n = nxt_sys_sendfile(fb->file->fd, c->socket.fd, fb->file_pos,
file_size, ht, &sent, 0);
err = (n == -1) ? nxt_errno : 0;
nxt_log_debug(c->socket.log, "sendfile(): %d sent:%O", n, sent);
if (n == -1) {
switch (err) {
case NXT_EAGAIN:
c->socket.write_ready = 0;
break;
case NXT_EINTR:
break;
default:
c->socket.error = err;
nxt_log_error(nxt_socket_error_level(err, c->socket.log_error),
c->socket.log, "sendfile(%FD, %d, %O, %uz) failed "
"%E \"%FN\" hd:%ui tr:%ui", fb->file->fd,
c->socket.fd, fb->file_pos, file_size,
err, fb->file->name, nhd, ntr);
return NXT_ERROR;
}
nxt_log_debug(c->socket.log, "sendfile() %E", err);
return sent;
} else if (sent == 0) {
nxt_log_error(NXT_LOG_ERR, c->socket.log,
"file \"%FN\" was truncated while sendfile()",
fb->file->name);
return NXT_ERROR;
}
if (sent < (nxt_off_t) sb.size) {
c->socket.write_ready = 0;
}
return sent;
}

79
src/nxt_gmtime.c Normal file
View File

@@ -0,0 +1,79 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
/* The function is valid for positive nxt_time_t only. */
void
nxt_gmtime(nxt_time_t s, struct tm *tm)
{
nxt_int_t yday;
nxt_uint_t daytime, mday, mon, year, days, leap;
days = (nxt_uint_t) (s / 86400);
daytime = (nxt_uint_t) (s % 86400);
/* January 1, 1970 was Thursday. */
tm->tm_wday = (4 + days) % 7;
/* The algorithm based on Gauss' formula. */
/* Days since March 1, 1 BCE. */
days = days - (31 + 28) + 719527;
/*
* The "days" should be adjusted by 1 only, however some March 1st's
* go to previous year, so "days" are adjusted by 2. This also shifts
* the last February days to the next year, but this is catched by
* negative "yday".
*/
year = (days + 2) * 400 / (365 * 400 + 100 - 4 + 1);
yday = days - (365 * year + year / 4 - year / 100 + year / 400);
leap = (year % 4 == 0) && (year % 100 || (year % 400 == 0));
if (yday < 0) {
yday = 365 + leap + yday;
year--;
}
/*
* An empirical formula that maps "yday" to month.
* There are at least 10 variants, some of them are:
* mon = (yday + 31) * 15 / 459
* mon = (yday + 31) * 17 / 520
* mon = (yday + 31) * 20 / 612
*/
mon = (yday + 31) * 10 / 306;
/* The Gauss' formula that evaluates days before month. */
mday = yday - (367 * mon / 12 - 30) + 1;
if (yday >= 306) {
year++;
mon -= 11;
yday -= 306;
} else {
mon++;
yday += 31 + 28 + leap;
}
tm->tm_mday = mday;
tm->tm_mon = mon;
tm->tm_year = year - 1900;
tm->tm_yday = yday;
tm->tm_hour = daytime / 3600;
daytime %= 3600;
tm->tm_min = daytime / 60;
tm->tm_sec = daytime % 60;
}

742
src/nxt_gnutls.c Normal file
View File

@@ -0,0 +1,742 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#include <gnutls/gnutls.h>
typedef struct {
gnutls_session_t session;
uint8_t times; /* 2 bits */
uint8_t no_shutdown; /* 1 bit */
nxt_buf_mem_t buffer;
} nxt_gnutls_conn_t;
typedef struct {
gnutls_priority_t ciphers;
gnutls_certificate_credentials_t certificate;
} nxt_gnutls_ctx_t;
#if (NXT_HAVE_GNUTLS_SET_TIME)
time_t nxt_gnutls_time(time_t *tp);
#endif
static nxt_int_t nxt_gnutls_server_init(nxt_ssltls_conf_t *conf);
static nxt_int_t nxt_gnutls_set_ciphers(nxt_ssltls_conf_t *conf);
static void nxt_gnutls_conn_init(nxt_thread_t *thr, nxt_ssltls_conf_t *conf,
nxt_event_conn_t *c);
static void nxt_gnutls_session_cleanup(void *data);
static ssize_t nxt_gnutls_pull(gnutls_transport_ptr_t data, void *buf,
size_t size);
static ssize_t nxt_gnutls_push(gnutls_transport_ptr_t data, const void *buf,
size_t size);
#if (NXT_HAVE_GNUTLS_VEC_PUSH)
static ssize_t nxt_gnutls_vec_push(gnutls_transport_ptr_t data,
const giovec_t *iov, int iovcnt);
#endif
static void nxt_gnutls_conn_handshake(nxt_thread_t *thr, void *obj, void *data);
static void nxt_gnutls_conn_io_read(nxt_thread_t *thr, void *obj, void *data);
static ssize_t nxt_gnutls_conn_io_write_chunk(nxt_thread_t *thr,
nxt_event_conn_t *c, nxt_buf_t *b, size_t limit);
static ssize_t nxt_gnutls_conn_io_send(nxt_event_conn_t *c, void *buf,
size_t size);
static void nxt_gnutls_conn_io_shutdown(nxt_thread_t *thr, void *obj,
void *data);
static nxt_int_t nxt_gnutls_conn_test_error(nxt_thread_t *thr,
nxt_event_conn_t *c, ssize_t err, nxt_work_handler_t handler);
static void nxt_cdecl nxt_gnutls_conn_log_error(nxt_event_conn_t *c,
ssize_t err, const char *fmt, ...);
static nxt_uint_t nxt_gnutls_log_error_level(nxt_event_conn_t *c, ssize_t err);
static void nxt_cdecl nxt_gnutls_log_error(nxt_uint_t level, nxt_log_t *log,
int err, const char *fmt, ...);
const nxt_ssltls_lib_t nxt_gnutls_lib = {
nxt_gnutls_server_init,
NULL,
};
static nxt_event_conn_io_t nxt_gnutls_event_conn_io = {
NULL,
NULL,
nxt_gnutls_conn_io_read,
NULL,
NULL,
nxt_event_conn_io_write,
nxt_gnutls_conn_io_write_chunk,
NULL,
NULL,
nxt_gnutls_conn_io_send,
nxt_gnutls_conn_io_shutdown,
};
static nxt_int_t
nxt_gnutls_start(void)
{
int ret;
static nxt_bool_t started;
if (nxt_fast_path(started)) {
return NXT_OK;
}
started = 1;
/* TODO: gnutls_global_deinit */
ret = gnutls_global_init();
if (ret != GNUTLS_E_SUCCESS) {
nxt_gnutls_log_error(NXT_LOG_CRIT, nxt_thread_log(), ret,
"gnutls_global_init() failed");
return NXT_ERROR;
}
nxt_thread_log_error(NXT_LOG_INFO, "GnuTLS version: %s",
gnutls_check_version(NULL));
#if (NXT_HAVE_GNUTLS_SET_TIME)
gnutls_global_set_time_function(nxt_gnutls_time);
#endif
return NXT_OK;
}
#if (NXT_HAVE_GNUTLS_SET_TIME)
/* GnuTLS 2.12.0 */
time_t
nxt_gnutls_time(time_t *tp)
{
time_t t;
nxt_thread_t *thr;
thr = nxt_thread();
nxt_log_debug(thr->log, "gnutls time");
t = (time_t) nxt_thread_time(thr);
if (tp != NULL) {
*tp = t;
}
return t;
}
#endif
static nxt_int_t
nxt_gnutls_server_init(nxt_ssltls_conf_t *conf)
{
int ret;
char *certificate, *key, *ca_certificate;
nxt_thread_t *thr;
nxt_gnutls_ctx_t *ctx;
if (nxt_slow_path(nxt_gnutls_start() != NXT_OK)) {
return NXT_ERROR;
}
/* TODO: mem_pool, cleanup: gnutls_certificate_free_credentials,
gnutls_priority_deinit */
ctx = nxt_zalloc(sizeof(nxt_gnutls_ctx_t));
if (ctx == NULL) {
return NXT_ERROR;
}
conf->ctx = ctx;
conf->conn_init = nxt_gnutls_conn_init;
thr = nxt_thread();
ret = gnutls_certificate_allocate_credentials(&ctx->certificate);
if (ret != GNUTLS_E_SUCCESS) {
nxt_gnutls_log_error(NXT_LOG_CRIT, thr->log, ret,
"gnutls_certificate_allocate_credentials() failed");
return NXT_ERROR;
}
certificate = conf->certificate;
key = conf->certificate_key;
ret = gnutls_certificate_set_x509_key_file(ctx->certificate, certificate,
key, GNUTLS_X509_FMT_PEM);
if (ret != GNUTLS_E_SUCCESS) {
nxt_gnutls_log_error(NXT_LOG_CRIT, thr->log, ret,
"gnutls_certificate_set_x509_key_file(\"%s\", \"%s\") failed",
certificate, key);
goto certificate_fail;
}
if (nxt_gnutls_set_ciphers(conf) != NXT_OK) {
goto ciphers_fail;
}
if (conf->ca_certificate != NULL) {
ca_certificate = conf->ca_certificate;
ret = gnutls_certificate_set_x509_trust_file(ctx->certificate,
ca_certificate,
GNUTLS_X509_FMT_PEM);
if (ret < 0) {
nxt_gnutls_log_error(NXT_LOG_CRIT, thr->log, ret,
"gnutls_certificate_set_x509_trust_file(\"%s\") failed",
ca_certificate);
goto ca_certificate_fail;
}
}
return NXT_OK;
ca_certificate_fail:
gnutls_priority_deinit(ctx->ciphers);
ciphers_fail:
certificate_fail:
gnutls_certificate_free_credentials(ctx->certificate);
return NXT_ERROR;
}
static nxt_int_t
nxt_gnutls_set_ciphers(nxt_ssltls_conf_t *conf)
{
int ret;
const char *ciphers;
const char *err;
nxt_gnutls_ctx_t *ctx;
ciphers = (conf->ciphers != NULL) ? conf->ciphers : "NORMAL:!COMP-DEFLATE";
ctx = conf->ctx;
ret = gnutls_priority_init(&ctx->ciphers, ciphers, &err);
switch (ret) {
case GNUTLS_E_SUCCESS:
return NXT_OK;
case GNUTLS_E_INVALID_REQUEST:
nxt_gnutls_log_error(NXT_LOG_CRIT, nxt_thread_log(), ret,
"gnutls_priority_init(\"%s\") failed at \"%s\"",
ciphers, err);
return NXT_ERROR;
default:
nxt_gnutls_log_error(NXT_LOG_CRIT, nxt_thread_log(), ret,
"gnutls_priority_init() failed");
return NXT_ERROR;
}
}
static void
nxt_gnutls_conn_init(nxt_thread_t *thr, nxt_ssltls_conf_t *conf,
nxt_event_conn_t *c)
{
int ret;
gnutls_session_t sess;
nxt_gnutls_ctx_t *ctx;
nxt_gnutls_conn_t *ssltls;
nxt_mem_pool_cleanup_t *mpcl;
nxt_log_debug(c->socket.log, "gnutls conn init");
ssltls = nxt_mem_zalloc(c->mem_pool, sizeof(nxt_gnutls_conn_t));
if (ssltls == NULL) {
goto fail;
}
c->u.ssltls = ssltls;
nxt_buf_mem_set_size(&ssltls->buffer, conf->buffer_size);
mpcl = nxt_mem_pool_cleanup(c->mem_pool, 0);
if (mpcl == NULL) {
goto fail;
}
ret = gnutls_init(&ssltls->session, GNUTLS_SERVER);
if (ret != GNUTLS_E_SUCCESS) {
nxt_gnutls_log_error(NXT_LOG_CRIT, c->socket.log, ret,
"gnutls_init() failed");
goto fail;
}
sess = ssltls->session;
mpcl->handler = nxt_gnutls_session_cleanup;
mpcl->data = ssltls;
ctx = conf->ctx;
ret = gnutls_priority_set(sess, ctx->ciphers);
if (ret != GNUTLS_E_SUCCESS) {
nxt_gnutls_log_error(NXT_LOG_CRIT, c->socket.log, ret,
"gnutls_priority_set() failed");
goto fail;
}
/*
* Disable TLS random padding of records in CBC ciphers,
* which may be up to 255 bytes.
*/
gnutls_record_disable_padding(sess);
ret = gnutls_credentials_set(sess, GNUTLS_CRD_CERTIFICATE,
ctx->certificate);
if (ret != GNUTLS_E_SUCCESS) {
nxt_gnutls_log_error(NXT_LOG_CRIT, c->socket.log, ret,
"gnutls_credentials_set() failed");
goto fail;
}
if (conf->ca_certificate != NULL) {
gnutls_certificate_server_set_request(sess, GNUTLS_CERT_REQUEST);
}
gnutls_transport_set_ptr(sess, (gnutls_transport_ptr_t) c);
gnutls_transport_set_pull_function(sess, nxt_gnutls_pull);
gnutls_transport_set_push_function(sess, nxt_gnutls_push);
#if (NXT_HAVE_GNUTLS_VEC_PUSH)
gnutls_transport_set_vec_push_function(sess, nxt_gnutls_vec_push);
#endif
c->io = &nxt_gnutls_event_conn_io;
c->sendfile = NXT_CONN_SENDFILE_OFF;
nxt_gnutls_conn_handshake(thr, c, c->socket.data);
return;
fail:
nxt_event_conn_io_handle(thr, c->read_work_queue,
c->read_state->error_handler, c, c->socket.data);
}
static void
nxt_gnutls_session_cleanup(void *data)
{
nxt_gnutls_conn_t *ssltls;
ssltls = data;
nxt_thread_log_debug("gnutls session cleanup");
nxt_free(ssltls->buffer.start);
gnutls_deinit(ssltls->session);
}
static ssize_t
nxt_gnutls_pull(gnutls_transport_ptr_t data, void *buf, size_t size)
{
ssize_t n;
nxt_thread_t *thr;
nxt_event_conn_t *c;
c = data;
thr = nxt_thread();
n = thr->engine->event->io->recv(c, buf, size, 0);
if (n == NXT_AGAIN) {
nxt_set_errno(NXT_EAGAIN);
return -1;
}
return n;
}
static ssize_t
nxt_gnutls_push(gnutls_transport_ptr_t data, const void *buf, size_t size)
{
ssize_t n;
nxt_thread_t *thr;
nxt_event_conn_t *c;
c = data;
thr = nxt_thread();
n = thr->engine->event->io->send(c, (u_char *) buf, size);
if (n == NXT_AGAIN) {
nxt_set_errno(NXT_EAGAIN);
return -1;
}
return n;
}
#if (NXT_HAVE_GNUTLS_VEC_PUSH)
/* GnuTLS 2.12.0 */
static ssize_t
nxt_gnutls_vec_push(gnutls_transport_ptr_t data, const giovec_t *iov,
int iovcnt)
{
ssize_t n;
nxt_thread_t *thr;
nxt_event_conn_t *c;
c = data;
thr = nxt_thread();
/*
* This code assumes that giovec_t is the same as "struct iovec"
* and nxt_iobuf_t. It is not true for Windows.
*/
n = thr->engine->event->io->writev(c, (nxt_iobuf_t *) iov, iovcnt);
if (n == NXT_AGAIN) {
nxt_set_errno(NXT_EAGAIN);
return -1;
}
return n;
}
#endif
static void
nxt_gnutls_conn_handshake(nxt_thread_t *thr, void *obj, void *data)
{
int err;
nxt_int_t ret;
nxt_event_conn_t *c;
nxt_gnutls_conn_t *ssltls;
c = obj;
ssltls = c->u.ssltls;
nxt_log_debug(thr->log, "gnutls conn handshake: %d", ssltls->times);
/* "ssltls->times == 1" is suitable to run gnutls_handshake() in job. */
err = gnutls_handshake(ssltls->session);
nxt_thread_time_debug_update(thr);
nxt_log_debug(thr->log, "gnutls_handshake(): %d", err);
if (err == GNUTLS_E_SUCCESS) {
nxt_gnutls_conn_io_read(thr, c, data);
return;
}
ret = nxt_gnutls_conn_test_error(thr, c, err, nxt_gnutls_conn_handshake);
if (ret == NXT_ERROR) {
nxt_gnutls_conn_log_error(c, err, "gnutls_handshake() failed");
nxt_event_conn_io_handle(thr, c->read_work_queue,
c->read_state->error_handler, c, data);
} else if (err == GNUTLS_E_AGAIN
&& ssltls->times < 2
&& gnutls_record_get_direction(ssltls->session) == 0)
{
ssltls->times++;
}
}
static void
nxt_gnutls_conn_io_read(nxt_thread_t *thr, void *obj, void *data)
{
ssize_t n;
nxt_buf_t *b;
nxt_int_t ret;
nxt_event_conn_t *c;
nxt_gnutls_conn_t *ssltls;
nxt_work_handler_t handler;
c = obj;
nxt_log_debug(thr->log, "gnutls conn read");
handler = c->read_state->ready_handler;
b = c->read;
/* b == NULL is used to test descriptor readiness. */
if (b != NULL) {
ssltls = c->u.ssltls;
n = gnutls_record_recv(ssltls->session, b->mem.free,
b->mem.end - b->mem.free);
nxt_log_debug(thr->log, "gnutls_record_recv(%d, %p, %uz): %z",
c->socket.fd, b->mem.free, b->mem.end - b->mem.free, n);
if (n > 0) {
/* c->socket.read_ready is kept. */
b->mem.free += n;
handler = c->read_state->ready_handler;
} else if (n == 0) {
handler = c->read_state->close_handler;
} else {
ret = nxt_gnutls_conn_test_error(thr, c, n,
nxt_gnutls_conn_io_read);
if (nxt_fast_path(ret != NXT_ERROR)) {
return;
}
nxt_gnutls_conn_log_error(c, n,
"gnutls_record_recv(%d, %p, %uz): failed",
c->socket.fd, b->mem.free,
b->mem.end - b->mem.free);
handler = c->read_state->error_handler;
}
}
nxt_event_conn_io_handle(thr, c->read_work_queue, handler, c, data);
}
static ssize_t
nxt_gnutls_conn_io_write_chunk(nxt_thread_t *thr, nxt_event_conn_t *c,
nxt_buf_t *b, size_t limit)
{
nxt_gnutls_conn_t *ssltls;
nxt_log_debug(thr->log, "gnutls conn write chunk");
ssltls = c->u.ssltls;
return nxt_sendbuf_copy_coalesce(c, &ssltls->buffer, b, limit);
}
static ssize_t
nxt_gnutls_conn_io_send(nxt_event_conn_t *c, void *buf, size_t size)
{
ssize_t n;
nxt_int_t ret;
nxt_gnutls_conn_t *ssltls;
ssltls = c->u.ssltls;
n = gnutls_record_send(ssltls->session, buf, size);
nxt_log_debug(c->socket.log, "gnutls_record_send(%d, %p, %uz): %z",
c->socket.fd, buf, size, n);
if (n > 0) {
return n;
}
ret = nxt_gnutls_conn_test_error(nxt_thread(), c, n,
nxt_event_conn_io_write);
if (nxt_slow_path(ret == NXT_ERROR)) {
nxt_gnutls_conn_log_error(c, n,
"gnutls_record_send(%d, %p, %uz): failed",
c->socket.fd, buf, size);
}
return ret;
}
static void
nxt_gnutls_conn_io_shutdown(nxt_thread_t *thr, void *obj, void *data)
{
int err;
nxt_int_t ret;
nxt_event_conn_t *c;
nxt_gnutls_conn_t *ssltls;
nxt_work_handler_t handler;
gnutls_close_request_t how;
c = obj;
ssltls = c->u.ssltls;
if (ssltls->session == NULL || ssltls->no_shutdown) {
handler = c->write_state->close_handler;
goto done;
}
nxt_log_debug(c->socket.log, "gnutls conn shutdown");
if (c->socket.timedout || c->socket.error != 0) {
how = GNUTLS_SHUT_WR;
} else if (c->socket.closed) {
how = GNUTLS_SHUT_RDWR;
} else {
how = GNUTLS_SHUT_RDWR;
}
err = gnutls_bye(ssltls->session, how);
nxt_log_debug(c->socket.log, "gnutls_bye(%d, %d): %d",
c->socket.fd, how, err);
if (err == GNUTLS_E_SUCCESS) {
handler = c->write_state->close_handler;
} else {
ret = nxt_gnutls_conn_test_error(thr, c, err,
nxt_gnutls_conn_io_shutdown);
if (ret != NXT_ERROR) { /* ret == NXT_AGAIN */
c->socket.error_handler = c->read_state->error_handler;
nxt_event_timer_add(thr->engine, &c->read_timer, 5000);
return;
}
nxt_gnutls_conn_log_error(c, err, "gnutls_bye(%d) failed",
c->socket.fd);
handler = c->write_state->error_handler;
}
done:
nxt_event_conn_io_handle(thr, c->write_work_queue, handler, c, data);
}
static nxt_int_t
nxt_gnutls_conn_test_error(nxt_thread_t *thr, nxt_event_conn_t *c, ssize_t err,
nxt_work_handler_t handler)
{
int ret;
nxt_gnutls_conn_t *ssltls;
switch (err) {
case GNUTLS_E_REHANDSHAKE:
case GNUTLS_E_AGAIN:
ssltls = c->u.ssltls;
ret = gnutls_record_get_direction(ssltls->session);
nxt_log_debug(thr->log, "gnutls_record_get_direction(): %d", ret);
if (ret == 0) {
/* A read direction. */
nxt_event_fd_block_write(thr->engine, &c->socket);
c->socket.read_ready = 0;
c->socket.read_handler = handler;
if (nxt_event_fd_is_disabled(c->socket.read)) {
nxt_event_fd_enable_read(thr->engine, &c->socket);
}
} else {
/* A write direction. */
nxt_event_fd_block_read(thr->engine, &c->socket);
c->socket.write_ready = 0;
c->socket.write_handler = handler;
if (nxt_event_fd_is_disabled(c->socket.write)) {
nxt_event_fd_enable_write(thr->engine, &c->socket);
}
}
return NXT_AGAIN;
default:
c->socket.error = 1000; /* Nonexistent errno code. */
return NXT_ERROR;
}
}
static void
nxt_gnutls_conn_log_error(nxt_event_conn_t *c, ssize_t err,
const char *fmt, ...)
{
va_list args;
nxt_uint_t level;
u_char *p, msg[NXT_MAX_ERROR_STR];
level = nxt_gnutls_log_error_level(c, err);
if (nxt_log_level_enough(c->socket.log, level)) {
va_start(args, fmt);
p = nxt_vsprintf(msg, msg + sizeof(msg), fmt, args);
va_end(args);
nxt_log_error(level, c->socket.log, "%*s (%d: %s)",
p - msg, msg, err, gnutls_strerror(err));
}
}
static nxt_uint_t
nxt_gnutls_log_error_level(nxt_event_conn_t *c, ssize_t err)
{
nxt_gnutls_conn_t *ssltls;
switch (err) {
case GNUTLS_E_UNKNOWN_CIPHER_SUITE: /* -21 */
/* Disable gnutls_bye(), because it returns GNUTLS_E_INTERNAL_ERROR. */
ssltls = c->u.ssltls;
ssltls->no_shutdown = 1;
/* Fall through. */
case GNUTLS_E_UNEXPECTED_PACKET_LENGTH: /* -9 */
c->socket.error = 1000; /* Nonexistent errno code. */
break;
default:
return NXT_LOG_CRIT;
}
return NXT_LOG_INFO;
}
static void
nxt_gnutls_log_error(nxt_uint_t level, nxt_log_t *log, int err,
const char *fmt, ...)
{
va_list args;
u_char *p, msg[NXT_MAX_ERROR_STR];
va_start(args, fmt);
p = nxt_vsprintf(msg, msg + sizeof(msg), fmt, args);
va_end(args);
nxt_log_error(level, log, "%*s (%d: %s)",
p - msg, msg, err, gnutls_strerror(err));
}

47
src/nxt_hash.h Normal file
View File

@@ -0,0 +1,47 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_HASH_H_INCLUDED_
#define _NXT_HASH_H_INCLUDED_
typedef struct {
nxt_lvlhsh_t lvlhsh;
const nxt_lvlhsh_proto_t *proto;
void *pool;
} nxt_hash_t;
nxt_inline nxt_int_t
nxt_hash_find(nxt_hash_t *h, nxt_lvlhsh_query_t *lhq)
{
lhq->proto = h->proto;
return nxt_lvlhsh_find(&h->lvlhsh, lhq);
}
nxt_inline nxt_int_t
nxt_hash_insert(nxt_hash_t *h, nxt_lvlhsh_query_t *lhq)
{
lhq->proto = h->proto;
lhq->pool = h->pool;
return nxt_lvlhsh_insert(&h->lvlhsh, lhq);
}
nxt_inline nxt_int_t
nxt_hash_delete(nxt_hash_t *h, nxt_lvlhsh_query_t *lhq)
{
lhq->proto = h->proto;
lhq->pool = h->pool;
return nxt_lvlhsh_delete(&h->lvlhsh, lhq);
}
#endif /* _NXT_HASH_H_INCLUDED_ */

138
src/nxt_hpux_sendfile.c Normal file
View File

@@ -0,0 +1,138 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#ifdef NXT_TEST_BUILD_HPUX_SENDFILE
ssize_t nxt_hpux_event_conn_io_sendfile(nxt_event_conn_t *c, nxt_buf_t *b,
size_t limit);
static ssize_t nxt_sys_sendfile(int s, int fd, off_t offset, size_t nbytes,
const struct iovec *hdtrl, int flags)
{
return -1;
}
#else
/* sendfile() is not declared if _XOPEN_SOURCE_EXTENDED is defined. */
sbsize_t sendfile(int s, int fd, off_t offset, bsize_t nbytes,
const struct iovec *hdtrl, int flags);
#define nxt_sys_sendfile sendfile
#endif
ssize_t
nxt_hpux_event_conn_io_sendfile(nxt_event_conn_t *c, nxt_buf_t *b, size_t limit)
{
size_t file_size;
ssize_t n;
nxt_buf_t *fb;
nxt_err_t err;
nxt_uint_t nhd, ntr;
struct iovec iov[NXT_IOBUF_MAX], *hdtrl;
nxt_sendbuf_coalesce_t sb;
sb.buf = b;
sb.iobuf = iov;
sb.nmax = NXT_IOBUF_MAX;
sb.sync = 0;
sb.size = 0;
sb.limit = limit;
nhd = nxt_sendbuf_mem_coalesce(&sb);
if (nhd == 0 && sb.sync) {
return 0;
}
if (nhd > 1 || sb.buf == NULL || !nxt_buf_is_file(sb.buf)) {
return nxt_event_conn_io_writev(c, iov, nhd);
}
fb = sb.buf;
file_size = nxt_sendbuf_file_coalesce(&sb);
if (file_size == 0) {
return nxt_event_conn_io_writev(c, iov, nhd);
}
sb.iobuf = &iov[1];
sb.nmax = 1;
ntr = nxt_sendbuf_mem_coalesce(&sb);
/*
* Disposal of surplus kernel operations
* if there are no headers and trailers.
*/
if (nhd == 0) {
hdtrl = NULL;
iov[0].iov_base = NULL;
iov[0].iov_len = 0;
} else {
hdtrl = iov;
}
if (ntr == 0) {
iov[1].iov_base = NULL;
iov[1].iov_len = 0;
} else {
hdtrl = iov;
}
nxt_log_debug(c->socket.log, "sendfile(%d, %FD, @%O, %uz) hd:%ui tr:%ui",
c->socket.fd, fb->file->fd, fb->file_pos, file_size,
nhd, ntr);
n = nxt_sys_sendfile(c->socket.fd, fb->file->fd, fb->file_pos,
file_size, hdtrl, 0);
err = (n == -1) ? nxt_errno : 0;
nxt_log_debug(c->socket.log, "sendfile(): %uz", n);
if (n == -1) {
switch (err) {
case NXT_EAGAIN:
c->socket.write_ready = 0;
break;
case NXT_EINTR:
break;
default:
c->socket.error = err;
nxt_log_error(nxt_socket_error_level(err, c->socket.log_error),
c->socket.log, "sendfile(%d, %FD, @%O, %uz) failed "
"%E \"%FN\" hd:%ui tr:%ui", c->socket.fd,
fb->file->fd, fb->file_pos, file_size,
err, &fb->file->name, nhd, ntr);
return NXT_ERROR;
}
nxt_log_debug(c->socket.log, "sendfile() %E", err);
return 0;
}
if (n < (ssize_t) sb.size) {
c->socket.write_ready = 0;
}
return n;
}

263
src/nxt_http_chunk_parse.c Normal file
View File

@@ -0,0 +1,263 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#define NXT_HTTP_CHUNK_MIDDLE 0
#define NXT_HTTP_CHUNK_END_ON_BORDER 1
#define NXT_HTTP_CHUNK_END 2
#define \
nxt_size_is_sufficient(cs) \
(cs < ((__typeof__(cs)) 1 << (sizeof(cs) * 8 - 4)))
static nxt_int_t nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp,
nxt_buf_t ***tail, nxt_buf_t *in);
nxt_buf_t *
nxt_http_chunk_parse(nxt_http_chunk_parse_t *hcp, nxt_buf_t *in)
{
u_char c, ch;
nxt_int_t ret;
nxt_buf_t *b, *out, *nb, **tail;
nxt_thread_t *thr;
enum {
sw_start = 0,
sw_chunk_size,
sw_chunk_size_linefeed,
sw_chunk_end_newline,
sw_chunk_end_linefeed,
sw_chunk,
} state;
out = NULL;
tail = &out;
state = hcp->state;
for (b = in; b != NULL; b = b->next) {
hcp->pos = b->mem.pos;
while (hcp->pos < b->mem.free) {
/*
* The sw_chunk state is tested outside the switch
* to preserve hcp->pos and to not touch memory.
*/
if (state == sw_chunk) {
ret = nxt_http_chunk_buffer(hcp, &tail, b);
if (ret == NXT_HTTP_CHUNK_MIDDLE) {
goto next;
}
if (nxt_slow_path(ret == NXT_ERROR)) {
hcp->error = 1;
goto done;
}
state = sw_chunk_end_newline;
if (ret == NXT_HTTP_CHUNK_END_ON_BORDER) {
goto next;
}
/* ret == NXT_HTTP_CHUNK_END_ON_BORDER */
}
ch = *hcp->pos++;
switch (state) {
case sw_start:
state = sw_chunk_size;
c = ch - '0';
if (c <= 9) {
hcp->chunk_size = c;
continue;
}
c = (ch | 0x20) - 'a';
if (c <= 5) {
hcp->chunk_size = 0x0a + c;
continue;
}
goto chunk_error;
case sw_chunk_size:
c = ch - '0';
if (c > 9) {
c = (ch | 0x20) - 'a';
if (nxt_fast_path(c <= 5)) {
c += 0x0a;
} else if (nxt_fast_path(ch == NXT_CR)) {
state = sw_chunk_size_linefeed;
continue;
} else {
goto chunk_error;
}
}
if (nxt_fast_path(nxt_size_is_sufficient(hcp->chunk_size))) {
hcp->chunk_size = (hcp->chunk_size << 4) + c;
continue;
}
goto chunk_error;
case sw_chunk_size_linefeed:
if (nxt_fast_path(ch == NXT_LF)) {
if (hcp->chunk_size != 0) {
state = sw_chunk;
continue;
}
hcp->last = 1;
state = sw_chunk_end_newline;
continue;
}
goto chunk_error;
case sw_chunk_end_newline:
if (nxt_fast_path(ch == NXT_CR)) {
state = sw_chunk_end_linefeed;
continue;
}
goto chunk_error;
case sw_chunk_end_linefeed:
if (nxt_fast_path(ch == NXT_LF)) {
if (!hcp->last) {
state = sw_start;
continue;
}
goto done;
}
goto chunk_error;
case sw_chunk:
/*
* This state is processed before the switch.
* It added here just to suppress a warning.
*/
continue;
}
}
if (b->retain == 0) {
/* No chunk data was found in a buffer. */
thr = nxt_thread();
nxt_thread_current_work_queue_add(thr, b->completion_handler,
b, b->parent, thr->log);
}
next:
continue;
}
hcp->state = state;
return out;
chunk_error:
hcp->chunk_error = 1;
done:
nb = nxt_buf_sync_alloc(hcp->mem_pool, NXT_BUF_SYNC_LAST);
if (nxt_fast_path(nb != NULL)) {
*tail = nb;
} else {
hcp->error = 1;
}
// STUB: hcp->chunk_error = 1;
// STUB: hcp->error = 1;
return out;
}
static nxt_int_t
nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp, nxt_buf_t ***tail,
nxt_buf_t *in)
{
u_char *p;
size_t size;
nxt_buf_t *b;
p = hcp->pos;
size = in->mem.free - p;
if (hcp->chunk_size >= size && in->retain == 0) {
/*
* Use original buffer if the buffer is lesser than or equal
* to a chunk size and this is the first chunk in the buffer.
*/
in->mem.pos = p;
**tail = in;
*tail = &in->next;
} else {
b = nxt_buf_mem_alloc(hcp->mem_pool, 0, 0);
if (nxt_slow_path(b == NULL)) {
return NXT_ERROR;
}
**tail = b;
*tail = &b->next;
b->parent = in;
in->retain++;
b->mem.pos = p;
b->mem.start = p;
if (hcp->chunk_size < size) {
p += hcp->chunk_size;
hcp->pos = p;
b->mem.free = p;
b->mem.end = p;
return NXT_HTTP_CHUNK_END;
}
b->mem.free = in->mem.free;
b->mem.end = in->mem.free;
}
hcp->chunk_size -= size;
if (hcp->chunk_size == 0) {
return NXT_HTTP_CHUNK_END_ON_BORDER;
}
return NXT_HTTP_CHUNK_MIDDLE;
}

595
src/nxt_http_parse.c Normal file
View File

@@ -0,0 +1,595 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static nxt_int_t nxt_http_split_header_part(nxt_http_split_header_parse_t *shp,
u_char *start, u_char *end);
static nxt_int_t nxt_http_split_header_join(nxt_http_split_header_parse_t *shp);
nxt_int_t
nxt_http_status_parse(nxt_http_status_parse_t *sp, nxt_buf_mem_t *b)
{
u_char ch, *p;
enum {
sw_start = 0,
sw_H,
sw_HT,
sw_HTT,
sw_HTTP,
sw_major_digit,
sw_dot,
sw_minor_digit,
sw_space_after_version,
sw_status_start,
sw_status_code,
sw_status_text,
sw_end,
} state;
state = sp->state;
for (p = b->pos; p < b->free; p++) {
ch = *p;
switch (state) {
/* "HTTP/" */
case sw_start:
if (nxt_fast_path(ch == 'H')) {
state = sw_H;
continue;
}
return NXT_ERROR;
case sw_H:
if (nxt_fast_path(ch == 'T')) {
state = sw_HT;
continue;
}
return NXT_ERROR;
case sw_HT:
if (nxt_fast_path(ch == 'T')) {
state = sw_HTT;
continue;
}
return NXT_ERROR;
case sw_HTT:
if (nxt_fast_path(ch == 'P')) {
state = sw_HTTP;
continue;
}
return NXT_ERROR;
case sw_HTTP:
if (nxt_fast_path(ch == '/')) {
state = sw_major_digit;
continue;
}
return NXT_ERROR;
/*
* Only HTTP/x.x format is tested because it
* is unlikely that other formats will appear.
*/
case sw_major_digit:
if (nxt_fast_path(ch >= '1' && ch <= '9')) {
sp->http_version = 10 * (ch - '0');
state = sw_dot;
continue;
}
return NXT_ERROR;
case sw_dot:
if (nxt_fast_path(ch == '.')) {
state = sw_minor_digit;
continue;
}
return NXT_ERROR;
case sw_minor_digit:
if (nxt_fast_path(ch >= '0' && ch <= '9')) {
sp->http_version += ch - '0';
state = sw_space_after_version;
continue;
}
return NXT_ERROR;
case sw_space_after_version:
if (nxt_fast_path(ch == ' ')) {
state = sw_status_start;
continue;
}
return NXT_ERROR;
case sw_status_start:
if (nxt_slow_path(ch == ' ')) {
continue;
}
sp->start = p;
state = sw_status_code;
/* Fall through. */
/* HTTP status code. */
case sw_status_code:
if (nxt_fast_path(ch >= '0' && ch <= '9')) {
sp->code = sp->code * 10 + (ch - '0');
continue;
}
switch (ch) {
case ' ':
state = sw_status_text;
continue;
case '.': /* IIS may send 403.1, 403.2, etc. */
state = sw_status_text;
continue;
case NXT_CR:
sp->end = p;
state = sw_end;
continue;
case NXT_LF:
sp->end = p;
goto done;
default:
return NXT_ERROR;
}
/* Any text until end of line. */
case sw_status_text:
switch (ch) {
case NXT_CR:
sp->end = p;
state = sw_end;
continue;
case NXT_LF:
sp->end = p;
goto done;
}
continue;
/* End of status line. */
case sw_end:
if (nxt_fast_path(ch == NXT_LF)) {
goto done;
}
return NXT_ERROR;
}
}
b->pos = p;
sp->state = state;
return NXT_AGAIN;
done:
b->pos = p + 1;
return NXT_OK;
}
nxt_int_t
nxt_http_header_parse(nxt_http_header_parse_t *hp, nxt_buf_mem_t *b)
{
u_char c, ch, *p;
uint32_t hash;
enum {
sw_start = 0,
sw_name,
sw_space_before_value,
sw_value,
sw_space_after_value,
sw_ignore_line,
sw_almost_done,
sw_header_almost_done,
} state;
static const u_char normal[256] nxt_aligned(64) =
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0-\0\0" "0123456789\0\0\0\0\0\0"
/* These 64 bytes should reside in one cache line */
"\0abcdefghijklmnopqrstuvwxyz\0\0\0\0\0"
"\0abcdefghijklmnopqrstuvwxyz\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
nxt_prefetch(&normal[0]);
nxt_prefetch(&normal[64]);
state = hp->state;
hash = hp->header_hash;
for (p = b->pos; p < b->free; p++) {
ch = *p;
switch (state) {
/* first char */
case sw_start:
hp->header_name_start = p;
hp->invalid_header = 0;
switch (ch) {
case NXT_CR:
hp->header_end = p;
state = sw_header_almost_done;
break;
case NXT_LF:
hp->header_end = p;
goto header_done;
default:
state = sw_name;
c = normal[ch];
if (c) {
hash = nxt_djb_hash_add(NXT_DJB_HASH_INIT, c);
break;
}
if (ch == '_') {
hash = nxt_djb_hash_add(NXT_DJB_HASH_INIT, ch);
hp->underscore = 1;
break;
}
hp->invalid_header = 1;
break;
}
break;
/* header name */
case sw_name:
c = normal[ch];
if (c) {
hash = nxt_djb_hash_add(hash, c);
break;
}
if (ch == ':') {
hp->header_name_end = p;
state = sw_space_before_value;
break;
}
if (ch == NXT_CR) {
hp->header_name_end = p;
hp->header_start = p;
hp->header_end = p;
state = sw_almost_done;
break;
}
if (ch == NXT_LF) {
hp->header_name_end = p;
hp->header_start = p;
hp->header_end = p;
goto done;
}
if (ch == '_') {
hash = nxt_djb_hash_add(hash, ch);
hp->underscore = 1;
break;
}
/* IIS may send the duplicate "HTTP/1.1 ..." lines */
if (ch == '/'
&& hp->upstream
&& p - hp->header_name_start == 4
&& nxt_memcmp(hp->header_name_start, "HTTP", 4) == 0)
{
state = sw_ignore_line;
break;
}
hp->invalid_header = 1;
break;
/* space* before header value */
case sw_space_before_value:
switch (ch) {
case ' ':
break;
case NXT_CR:
hp->header_start = p;
hp->header_end = p;
state = sw_almost_done;
break;
case NXT_LF:
hp->header_start = p;
hp->header_end = p;
goto done;
case '\0':
hp->invalid_header = 1;
/* Fall through. */
default:
hp->header_start = p;
state = sw_value;
break;
}
break;
/* header value */
case sw_value:
switch (ch) {
case ' ':
hp->header_end = p;
state = sw_space_after_value;
break;
case NXT_CR:
hp->header_end = p;
state = sw_almost_done;
break;
case NXT_LF:
hp->header_end = p;
goto done;
case '\0':
hp->invalid_header = 1;
break;
}
break;
/* space* before end of header line */
case sw_space_after_value:
switch (ch) {
case ' ':
break;
case NXT_CR:
state = sw_almost_done;
break;
case NXT_LF:
goto done;
case '\0':
hp->invalid_header = 1;
/* Fall through. */
default:
state = sw_value;
break;
}
break;
/* ignore header line */
case sw_ignore_line:
switch (ch) {
case NXT_LF:
state = sw_start;
break;
default:
break;
}
break;
/* end of header line */
case sw_almost_done:
switch (ch) {
case NXT_LF:
goto done;
case NXT_CR:
break;
default:
return NXT_DECLINED;
}
break;
/* end of header */
case sw_header_almost_done:
switch (ch) {
case NXT_LF:
goto header_done;
default:
return NXT_DECLINED;
}
}
}
b->pos = p;
hp->state = state;
hp->header_hash = hash;
return NXT_AGAIN;
done:
b->pos = p + 1;
hp->state = sw_start;
hp->header_hash = hash;
return NXT_OK;
header_done:
b->pos = p + 1;
hp->state = sw_start;
return NXT_DONE;
}
nxt_int_t
nxt_http_split_header_parse(nxt_http_split_header_parse_t *shp,
nxt_buf_mem_t *b)
{
u_char *end;
nxt_int_t ret;
if (shp->parts == NULL || nxt_array_is_empty(shp->parts)) {
ret = nxt_http_header_parse(&shp->parse, b);
if (nxt_fast_path(ret == NXT_OK)) {
return ret;
}
if (nxt_fast_path(ret == NXT_AGAIN)) {
/* A buffer is over. */
if (shp->parse.state == 0) {
/*
* A previous parsed header line is
* over right on the end of the buffer.
*/
return ret;
}
/*
* Add the first header line part and return NXT_AGAIN on success.
*/
return nxt_http_split_header_part(shp, shp->parse.header_name_start,
b->pos);
}
return ret;
}
/* A header line is split in buffers. */
end = nxt_memchr(b->pos, NXT_LF, b->free - b->pos);
if (end != NULL) {
/* The last header line part found. */
end++;
ret = nxt_http_split_header_part(shp, b->pos, end);
if (nxt_fast_path(ret != NXT_ERROR)) {
/* ret == NXT_AGAIN: success, mark the part if it were parsed. */
b->pos = end;
return nxt_http_split_header_join(shp);
}
return ret;
}
/* Add another header line part and return NXT_AGAIN on success. */
return nxt_http_split_header_part(shp, b->pos, b->free);
}
static nxt_int_t
nxt_http_split_header_part(nxt_http_split_header_parse_t *shp, u_char *start,
u_char *end)
{
nxt_http_header_part_t *part;
nxt_thread_log_debug("http source header part: \"%*s\"",
end - start, start);
if (shp->parts == NULL) {
shp->parts = nxt_array_create(shp->mem_pool, 2,
sizeof(nxt_http_header_part_t));
if (nxt_slow_path(shp->parts == NULL)) {
return NXT_ERROR;
}
}
if (!nxt_array_is_empty(shp->parts)) {
part = nxt_array_last(shp->parts);
if (part->end == end) {
part->end = end;
return NXT_AGAIN;
}
}
part = nxt_array_add(shp->parts);
if (nxt_fast_path(part != NULL)) {
part->start = start;
part->end = end;
return NXT_AGAIN;
}
return NXT_ERROR;
}
static nxt_int_t
nxt_http_split_header_join(nxt_http_split_header_parse_t *shp)
{
u_char *p;
size_t size;
nxt_uint_t n;
nxt_buf_mem_t b;
nxt_http_header_part_t *part;
part = shp->parts->elts;
n = shp->parts->nelts;
if (n == 1) {
/*
* A header line was read by parts, but resides continuously in a
* stream source buffer, so use disposition in the original buffer.
*/
b.pos = part->start;
b.free = part->end;
} else {
/* Join header line parts to store the header line and ot parse it. */
size = 0;
do {
size += part->end - part->start;
part++;
n--;
} while (n != 0);
p = nxt_mem_alloc(shp->mem_pool, size);
if (nxt_slow_path(p == NULL)) {
return NXT_ERROR;
}
b.pos = p;
part = shp->parts->elts;
n = shp->parts->nelts;
do {
p = nxt_cpymem(p, part->start, part->end - part->start);
part++;
n--;
} while (n != 0);
b.free = p;
}
/* b.start and b.end are not required for parsing. */
nxt_array_reset(shp->parts);
/* Reset a header parse state to the sw_start. */
shp->parse.state = 0;
return nxt_http_header_parse(&shp->parse, &b);
}

79
src/nxt_http_parse.h Normal file
View File

@@ -0,0 +1,79 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_HTTP_PARSE_H_INCLUDED_
#define _NXT_HTTP_PARSE_H_INCLUDED_
typedef struct {
uint8_t state;
uint8_t http_version;
uint32_t code;
u_char *start;
u_char *end;
} nxt_http_status_parse_t;
nxt_int_t nxt_http_status_parse(nxt_http_status_parse_t *sp, nxt_buf_mem_t *b);
typedef struct {
uint32_t header_hash;
uint8_t state;
uint8_t underscore; /* 1 bit */
uint8_t invalid_header; /* 1 bit */
uint8_t upstream; /* 1 bit */
u_char *header_start;
u_char *header_end;
u_char *header_name_start;
u_char *header_name_end;
} nxt_http_header_parse_t;
NXT_EXPORT nxt_int_t nxt_http_header_parse(nxt_http_header_parse_t *hp,
nxt_buf_mem_t *b);
typedef struct {
u_char *start;
u_char *end;
} nxt_http_header_part_t;
typedef struct {
nxt_array_t *parts; /* of nxt_http_header_part_t */
nxt_mem_pool_t *mem_pool;
nxt_http_header_parse_t parse;
} nxt_http_split_header_parse_t;
nxt_int_t nxt_http_split_header_parse(nxt_http_split_header_parse_t *shp,
nxt_buf_mem_t *b);
typedef struct {
u_char *pos;
nxt_mem_pool_t *mem_pool;
uint64_t chunk_size;
uint8_t state;
uint8_t last; /* 1 bit */
uint8_t chunk_error; /* 1 bit */
uint8_t error; /* 1 bit */
} nxt_http_chunk_parse_t;
NXT_EXPORT nxt_buf_t *nxt_http_chunk_parse(nxt_http_chunk_parse_t *hcp,
nxt_buf_t *in);
#endif /* _NXT_HTTP_PARSE_H_INCLUDED_ */

630
src/nxt_http_source.c Normal file
View File

@@ -0,0 +1,630 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
typedef struct {
nxt_http_chunk_parse_t parse;
nxt_source_hook_t next;
} nxt_http_source_chunk_t;
static nxt_buf_t *nxt_http_source_request_create(nxt_http_source_t *hs);
static void nxt_http_source_status_filter(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_http_source_header_filter(nxt_thread_t *thr, void *obj,
void *data);
static nxt_int_t nxt_http_source_header_line_process(nxt_http_source_t *hs);
static nxt_int_t nxt_http_source_content_length(nxt_upstream_source_t *us,
nxt_name_value_t *nv);
static nxt_int_t nxt_http_source_transfer_encoding(nxt_upstream_source_t *us,
nxt_name_value_t *nv);
static void nxt_http_source_header_ready(nxt_http_source_t *hs,
nxt_buf_t *rest);
static void nxt_http_source_chunk_filter(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_http_source_chunk_error(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_http_source_body_filter(nxt_thread_t *thr, void *obj,
void *data);
static void nxt_http_source_sync_buffer(nxt_thread_t *thr,
nxt_http_source_t *hs, nxt_buf_t *b);
static void nxt_http_source_error(nxt_stream_source_t *stream);
static void nxt_http_source_fail(nxt_http_source_t *hs);
static void nxt_http_source_message(const char *msg, size_t len, u_char *p);
void
nxt_http_source_handler(nxt_upstream_source_t *us,
nxt_http_source_request_create_t request_create)
{
nxt_http_source_t *hs;
nxt_stream_source_t *stream;
hs = nxt_mem_zalloc(us->buffers.mem_pool, sizeof(nxt_http_source_t));
if (nxt_slow_path(hs == NULL)) {
goto fail;
}
us->protocol_source = hs;
hs->header_in.list = nxt_list_create(us->buffers.mem_pool, 8,
sizeof(nxt_name_value_t));
if (nxt_slow_path(hs->header_in.list == NULL)) {
goto fail;
}
hs->header_in.hash = us->header_hash;
hs->upstream = us;
hs->request_create = request_create;
stream = us->stream;
if (stream == NULL) {
stream = nxt_mem_zalloc(us->buffers.mem_pool,
sizeof(nxt_stream_source_t));
if (nxt_slow_path(stream == NULL)) {
goto fail;
}
us->stream = stream;
stream->upstream = us;
} else {
nxt_memzero(stream, sizeof(nxt_stream_source_t));
}
/*
* Create the HTTP source filter chain:
* stream source | HTTP status line filter
*/
stream->next = &hs->query;
stream->error_handler = nxt_http_source_error;
hs->query.context = hs;
hs->query.filter = nxt_http_source_status_filter;
hs->header_in.content_length = -1;
stream->out = nxt_http_source_request_create(hs);
if (nxt_fast_path(stream->out != NULL)) {
nxt_memzero(&hs->u.status_parse, sizeof(nxt_http_status_parse_t));
nxt_stream_source_connect(stream);
return;
}
fail:
nxt_http_source_fail(hs);
}
nxt_inline u_char *
nxt_http_source_copy(u_char *p, nxt_str_t *src, size_t len)
{
u_char *s;
if (nxt_fast_path(len >= src->len)) {
len = src->len;
src->len = 0;
} else {
src->len -= len;
}
s = src->data;
src->data += len;
return nxt_cpymem(p, s, len);
}
static nxt_buf_t *
nxt_http_source_request_create(nxt_http_source_t *hs)
{
nxt_int_t ret;
nxt_buf_t *b, *req, **prev;
nxt_thread_log_debug("http source create request");
prev = &req;
new_buffer:
ret = nxt_buf_pool_mem_alloc(&hs->upstream->buffers, 0);
if (nxt_slow_path(ret != NXT_OK)) {
return NULL;
}
b = hs->upstream->buffers.current;
hs->upstream->buffers.current = NULL;
*prev = b;
prev = &b->next;
for ( ;; ) {
ret = hs->request_create(hs);
if (nxt_fast_path(ret == NXT_OK)) {
b->mem.free = nxt_http_source_copy(b->mem.free, &hs->u.request.copy,
b->mem.end - b->mem.free);
if (nxt_fast_path(hs->u.request.copy.len == 0)) {
continue;
}
nxt_thread_log_debug("\"%*s\"", b->mem.free - b->mem.pos,
b->mem.pos);
goto new_buffer;
}
if (nxt_slow_path(ret == NXT_ERROR)) {
return NULL;
}
/* ret == NXT_DONE */
break;
}
nxt_thread_log_debug("\"%*s\"", b->mem.free - b->mem.pos, b->mem.pos);
return req;
}
static void
nxt_http_source_status_filter(nxt_thread_t *thr, void *obj, void *data)
{
nxt_int_t ret;
nxt_buf_t *b;
nxt_http_source_t *hs;
hs = obj;
b = data;
/*
* No cycle over buffer chain is required since at
* start the stream source passes buffers one at a time.
*/
nxt_log_debug(thr->log, "http source status filter");
if (nxt_slow_path(nxt_buf_is_sync(b))) {
nxt_http_source_sync_buffer(thr, hs, b);
return;
}
ret = nxt_http_status_parse(&hs->u.status_parse, &b->mem);
if (nxt_fast_path(ret == NXT_OK)) {
/*
* Change the HTTP source filter chain:
* stream source | HTTP header filter
*/
hs->query.filter = nxt_http_source_header_filter;
nxt_log_debug(thr->log, "upstream status: \"%*s\"",
hs->u.status_parse.end - b->mem.start, b->mem.start);
hs->header_in.status = hs->u.status_parse.code;
nxt_log_debug(thr->log, "upstream version:%d status:%uD \"%*s\"",
hs->u.status_parse.http_version,
hs->u.status_parse.code,
hs->u.status_parse.end - hs->u.status_parse.start,
hs->u.status_parse.start);
nxt_memzero(&hs->u.header, sizeof(nxt_http_split_header_parse_t));
hs->u.header.mem_pool = hs->upstream->buffers.mem_pool;
nxt_http_source_header_filter(thr, hs, b);
return;
}
if (nxt_slow_path(ret == NXT_ERROR)) {
/* HTTP/0.9 response. */
hs->header_in.status = 200;
nxt_http_source_header_ready(hs, b);
return;
}
/* ret == NXT_AGAIN */
/*
* b->mem.pos is always equal to b->mem.end because b is a buffer
* which points to a response part read by the stream source.
* However, since the stream source is an immediate source of the
* status filter, b->parent is a buffer the stream source reads in.
*/
if (b->parent->mem.pos == b->parent->mem.end) {
nxt_http_source_message("upstream sent too long status line: \"%*s\"",
b->mem.pos - b->mem.start, b->mem.start);
nxt_http_source_fail(hs);
}
}
static void
nxt_http_source_header_filter(nxt_thread_t *thr, void *obj, void *data)
{
nxt_int_t ret;
nxt_buf_t *b;
nxt_http_source_t *hs;
hs = obj;
b = data;
/*
* No cycle over buffer chain is required since at
* start the stream source passes buffers one at a time.
*/
nxt_log_debug(thr->log, "http source header filter");
if (nxt_slow_path(nxt_buf_is_sync(b))) {
nxt_http_source_sync_buffer(thr, hs, b);
return;
}
for ( ;; ) {
ret = nxt_http_split_header_parse(&hs->u.header, &b->mem);
if (nxt_slow_path(ret != NXT_OK)) {
break;
}
ret = nxt_http_source_header_line_process(hs);
if (nxt_slow_path(ret != NXT_OK)) {
break;
}
}
if (nxt_fast_path(ret == NXT_DONE)) {
nxt_log_debug(thr->log, "http source header done");
nxt_http_source_header_ready(hs, b);
return;
}
if (nxt_fast_path(ret == NXT_AGAIN)) {
return;
}
if (ret != NXT_ERROR) {
/* ret == NXT_DECLINED: "\r" is not followed by "\n" */
nxt_log_error(NXT_LOG_ERR, thr->log,
"upstream sent invalid header line: \"%*s\\r...\"",
hs->u.header.parse.header_end
- hs->u.header.parse.header_name_start,
hs->u.header.parse.header_name_start);
}
/* ret == NXT_ERROR */
nxt_http_source_fail(hs);
}
static nxt_int_t
nxt_http_source_header_line_process(nxt_http_source_t *hs)
{
size_t name_len;
nxt_name_value_t *nv;
nxt_lvlhsh_query_t lhq;
nxt_http_header_parse_t *hp;
nxt_upstream_name_value_t *unv;
hp = &hs->u.header.parse;
name_len = hp->header_name_end - hp->header_name_start;
if (name_len > 255) {
nxt_http_source_message("upstream sent too long header field name: "
"\"%*s\"", name_len, hp->header_name_start);
return NXT_ERROR;
}
nv = nxt_list_add(hs->header_in.list);
if (nxt_slow_path(nv == NULL)) {
return NXT_ERROR;
}
nv->hash = hp->header_hash;
nv->skip = 0;
nv->name_len = name_len;
nv->name_start = hp->header_name_start;
nv->value_len = hp->header_end - hp->header_start;
nv->value_start = hp->header_start;
nxt_thread_log_debug("upstream header: \"%*s: %*s\"",
nv->name_len, nv->name_start,
nv->value_len, nv->value_start);
lhq.key_hash = nv->hash;
lhq.key.len = nv->name_len;
lhq.key.data = nv->name_start;
lhq.proto = &nxt_upstream_header_hash_proto;
if (nxt_lvlhsh_find(&hs->header_in.hash, &lhq) == NXT_OK) {
unv = lhq.value;
if (unv->handler(hs->upstream, nv) != NXT_OK) {
return NXT_ERROR;
}
}
return NXT_OK;
}
static const nxt_upstream_name_value_t nxt_http_source_headers[]
nxt_aligned(32) =
{
{ nxt_http_source_content_length,
nxt_upstream_name_value("content-length") },
{ nxt_http_source_transfer_encoding,
nxt_upstream_name_value("transfer-encoding") },
};
nxt_int_t
nxt_http_source_hash_create(nxt_mem_pool_t *mp, nxt_lvlhsh_t *lh)
{
return nxt_upstream_header_hash_add(mp, lh, nxt_http_source_headers,
nxt_nitems(nxt_http_source_headers));
}
static nxt_int_t
nxt_http_source_content_length(nxt_upstream_source_t *us, nxt_name_value_t *nv)
{
nxt_off_t length;
nxt_http_source_t *hs;
length = nxt_off_t_parse(nv->value_start, nv->value_len);
if (nxt_fast_path(length > 0)) {
hs = us->protocol_source;
hs->header_in.content_length = length;
return NXT_OK;
}
return NXT_ERROR;
}
static nxt_int_t
nxt_http_source_transfer_encoding(nxt_upstream_source_t *us,
nxt_name_value_t *nv)
{
u_char *end;
nxt_http_source_t *hs;
end = nv->value_start + nv->value_len;
if (nxt_memcasestrn(nv->value_start, end, "chunked", 7) != NULL) {
hs = us->protocol_source;
hs->chunked = 1;
}
return NXT_OK;
}
static void
nxt_http_source_header_ready(nxt_http_source_t *hs, nxt_buf_t *rest)
{
nxt_buf_t *b;
nxt_upstream_source_t *us;
nxt_http_source_chunk_t *hsc;
us = hs->upstream;
/* Free buffers used for request header. */
for (b = us->stream->out; b != NULL; b = b->next) {
nxt_buf_pool_free(&us->buffers, b);
}
if (nxt_fast_path(nxt_buf_pool_available(&us->buffers))) {
if (hs->chunked) {
hsc = nxt_mem_zalloc(hs->upstream->buffers.mem_pool,
sizeof(nxt_http_source_chunk_t));
if (nxt_slow_path(hsc == NULL)) {
goto fail;
}
/*
* Change the HTTP source filter chain:
* stream source | chunk filter | HTTP body filter
*/
hs->query.context = hsc;
hs->query.filter = nxt_http_source_chunk_filter;
hsc->next.context = hs;
hsc->next.filter = nxt_http_source_body_filter;
hsc->parse.mem_pool = hs->upstream->buffers.mem_pool;
if (nxt_buf_mem_used_size(&rest->mem) != 0) {
hs->rest = nxt_http_chunk_parse(&hsc->parse, rest);
if (nxt_slow_path(hs->rest == NULL)) {
goto fail;
}
}
} else {
/*
* Change the HTTP source filter chain:
* stream source | HTTP body filter
*/
hs->query.filter = nxt_http_source_body_filter;
if (nxt_buf_mem_used_size(&rest->mem) != 0) {
hs->rest = rest;
}
}
hs->upstream->state->ready_handler(hs);
return;
}
nxt_thread_log_error(NXT_LOG_ERR, "%d buffers %uDK each "
"are not enough to read upstream response",
us->buffers.max, us->buffers.size / 1024);
fail:
nxt_http_source_fail(hs);
}
static void
nxt_http_source_chunk_filter(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *b;
nxt_http_source_t *hs;
nxt_http_source_chunk_t *hsc;
hsc = obj;
b = data;
nxt_log_debug(thr->log, "http source chunk filter");
b = nxt_http_chunk_parse(&hsc->parse, b);
hs = hsc->next.context;
if (hsc->parse.error) {
nxt_http_source_fail(hs);
return;
}
if (hsc->parse.chunk_error) {
/* Output all parsed before a chunk error and close upstream. */
nxt_thread_current_work_queue_add(thr, nxt_http_source_chunk_error,
hs, NULL, thr->log);
}
if (b != NULL) {
nxt_source_filter(thr, hs->upstream->work_queue, &hsc->next, b);
}
}
static void
nxt_http_source_chunk_error(nxt_thread_t *thr, void *obj, void *data)
{
nxt_http_source_t *hs;
hs = obj;
nxt_http_source_fail(hs);
}
/*
* The HTTP source body filter accumulates first body buffers before the next
* filter will be established and sets completion handler for the last buffer.
*/
static void
nxt_http_source_body_filter(nxt_thread_t *thr, void *obj, void *data)
{
nxt_buf_t *b, *in;
nxt_http_source_t *hs;
hs = obj;
in = data;
nxt_log_debug(thr->log, "http source body filter");
for (b = in; b != NULL; b = b->next) {
if (nxt_buf_is_last(b)) {
b->data = hs->upstream->data;
b->completion_handler = hs->upstream->state->completion_handler;
}
}
if (hs->next != NULL) {
nxt_source_filter(thr, hs->upstream->work_queue, hs->next, in);
return;
}
nxt_buf_chain_add(&hs->rest, in);
}
static void
nxt_http_source_sync_buffer(nxt_thread_t *thr, nxt_http_source_t *hs,
nxt_buf_t *b)
{
if (nxt_buf_is_last(b)) {
nxt_log_error(NXT_LOG_ERR, thr->log,
"upstream closed prematurely connection");
} else {
nxt_log_error(NXT_LOG_ERR, thr->log, "%ui buffers %uz each are not "
"enough to process upstream response header",
hs->upstream->buffers.max,
hs->upstream->buffers.size);
}
/* The stream source sends only the last and the nobuf sync buffer. */
nxt_http_source_fail(hs);
}
static void
nxt_http_source_error(nxt_stream_source_t *stream)
{
nxt_http_source_t *hs;
nxt_thread_log_debug("http source error");
hs = stream->next->context;
nxt_http_source_fail(hs);
}
static void
nxt_http_source_fail(nxt_http_source_t *hs)
{
nxt_thread_t *thr;
thr = nxt_thread();
nxt_log_debug(thr->log, "http source fail");
/* TODO: fail, next upstream, or bad gateway */
hs->upstream->state->error_handler(thr, hs, NULL);
}
static void
nxt_http_source_message(const char *msg, size_t len, u_char *p)
{
if (len > NXT_MAX_ERROR_STR - 300) {
len = NXT_MAX_ERROR_STR - 300;
p[len++] = '.'; p[len++] = '.'; p[len++] = '.';
}
nxt_thread_log_error(NXT_LOG_ERR, msg, len, p);
}

49
src/nxt_http_source.h Normal file
View File

@@ -0,0 +1,49 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_HTTP_SOURCE_H_INCLUDED_
#define _NXT_HTTP_SOURCE_H_INCLUDED_
typedef struct {
nxt_str_t copy;
uintptr_t data[3];
} nxt_http_source_request_t;
typedef struct nxt_http_source_s nxt_http_source_t;
typedef nxt_int_t (*nxt_http_source_request_create_t)(nxt_http_source_t *hs);
struct nxt_http_source_s {
nxt_source_hook_t query;
nxt_source_hook_t *next;
nxt_upstream_source_t *upstream;
nxt_http_source_request_create_t request_create;
nxt_upstream_header_in_t header_in;
nxt_buf_t *rest;
uint32_t chunked; /* 1 bit */
union {
nxt_http_source_request_t request;
nxt_http_status_parse_t status_parse;
nxt_http_split_header_parse_t header;
} u;
};
NXT_EXPORT void nxt_http_source_handler(nxt_upstream_source_t *us,
nxt_http_source_request_create_t request_create);
NXT_EXPORT nxt_int_t nxt_http_source_hash_create(nxt_mem_pool_t *mp,
nxt_lvlhsh_t *lh);
#endif /* _NXT_HTTP_SOURCE_H_INCLUDED_ */

202
src/nxt_job.c Normal file
View File

@@ -0,0 +1,202 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
#if (NXT_THREADS)
static void nxt_job_thread_trampoline(nxt_thread_t *thr, void *obj, void *data);
static void nxt_job_thread_return_handler(nxt_thread_t *thr, void *obj,
void *data);
#endif
void *
nxt_job_create(nxt_mem_pool_t *mp, size_t size)
{
size_t cache_size;
nxt_job_t *job;
if (mp == NULL) {
mp = nxt_mem_pool_create(256);
if (nxt_slow_path(mp == NULL)) {
return NULL;
}
job = nxt_mem_zalloc(mp, size);
cache_size = 0;
} else {
job = nxt_mem_cache_zalloc0(mp, size);
cache_size = size;
}
if (nxt_fast_path(job != NULL)) {
job->cache_size = (uint16_t) cache_size;
job->mem_pool = mp;
nxt_job_set_name(job, "job");
}
/* Allow safe nxt_queue_remove() in nxt_job_destroy(). */
nxt_queue_self(&job->link);
return job;
}
void
nxt_job_init(nxt_job_t *job, size_t size)
{
nxt_memzero(job, size);
nxt_job_set_name(job, "job");
nxt_queue_self(&job->link);
}
void
nxt_job_destroy(void *data)
{
nxt_job_t *job;
job = data;
nxt_queue_remove(&job->link);
if (job->cache_size == 0) {
if (job->mem_pool != NULL) {
nxt_mem_pool_destroy(job->mem_pool);
}
} else {
nxt_mem_cache_free0(job->mem_pool, job, job->cache_size);
}
}
nxt_int_t
nxt_job_cleanup_add(nxt_mem_pool_t *mp, nxt_job_t *job)
{
nxt_mem_pool_cleanup_t *mpcl;
mpcl = nxt_mem_pool_cleanup(mp, 0);
if (nxt_fast_path(mpcl != NULL)) {
mpcl->handler = nxt_job_destroy;
mpcl->data = job;
return NXT_OK;
}
return NXT_ERROR;
}
/*
* The (void *) casts in nxt_thread_pool_post() and nxt_event_engine_post()
* calls and to the "nxt_work_handler_t" are required by Sun C.
*/
void
nxt_job_start(nxt_thread_t *thr, nxt_job_t *job, nxt_work_handler_t handler)
{
nxt_log_debug(thr->log, "%s start", job->name);
#if (NXT_THREADS)
if (job->thread_pool != NULL) {
nxt_int_t ret;
job->engine = thr->engine;
ret = nxt_thread_pool_post(job->thread_pool, nxt_job_thread_trampoline,
job, (void *) handler, job->log);
if (ret == NXT_OK) {
return;
}
handler = job->abort_handler;
}
#endif
handler(thr, job, job->data);
}
#if (NXT_THREADS)
/* A trampoline function is called by a thread pool thread. */
static void
nxt_job_thread_trampoline(nxt_thread_t *thr, void *obj, void *data)
{
nxt_job_t *job;
nxt_work_handler_t handler;
job = obj;
handler = (nxt_work_handler_t) data;
nxt_log_debug(thr->log, "%s thread", job->name);
if (nxt_slow_path(job->cancel)) {
nxt_job_return(thr, job, job->abort_handler);
} else {
handler(thr, job, job->data);
}
}
#endif
void
nxt_job_return(nxt_thread_t *thr, nxt_job_t *job, nxt_work_handler_t handler)
{
nxt_log_debug(thr->log, "%s return", job->name);
#if (NXT_THREADS)
if (job->engine != NULL) {
/* A return function is called in thread pool thread context. */
nxt_event_engine_post(job->engine, nxt_job_thread_return_handler,
job, (void *) handler, job->log);
return;
}
#endif
if (nxt_slow_path(job->cancel)) {
nxt_log_debug(thr->log, "%s cancellation", job->name);
handler = job->abort_handler;
}
nxt_thread_work_queue_push(thr, &thr->work_queue.main,
handler, job, job->data, thr->log);
}
#if (NXT_THREADS)
static void
nxt_job_thread_return_handler(nxt_thread_t *thr, void *obj, void *data)
{
nxt_job_t *job;
nxt_work_handler_t handler;
job = obj;
handler = (nxt_work_handler_t) data;
if (nxt_slow_path(job->cancel)) {
nxt_log_debug(thr->log, "%s cancellation", job->name);
handler = job->abort_handler;
}
handler(thr, job, job->data);
}
#endif

87
src/nxt_job.h Normal file
View File

@@ -0,0 +1,87 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NXT_JOB_H_INCLUDED_
#define _NXT_JOB_H_INCLUDED_
/*
* A job may run by separate thread, so each job should have its
* its own mem_pool. A job itself is allocated from this mem_pool.
* On job completion a job initiator can destroy the job at once
* with nxt_job_destroy() or can postpone the destruction with
* nxt_job_cleanup_add(), if the initiator uses data from the job's
* mem_pool.
*
* Several child jobs may run in context of another job in the same
* thread. In this case the child job may use a mem_pool of the
* parent job and the child job is allocated using the mem_pool's cache.
* nxt_job_destroy() just returns the job to the cache. All job
* allocations however still remain in the parent mem_pool.
*
* The first thread in job thread pool is created on demand. If this
* operation fails the job abort handler is called. It also is called
* if the job is canceled. To avoid race condition the abort handler
* always runs in context of a thread initiated the job. The abort
* handler may be as simple as nxt_job_destroy().
*/
typedef struct {
void *data;
nxt_work_handler_t abort_handler;
uint16_t cache_size;
uint8_t cancel; /* 1 bit */
nxt_mem_pool_t *mem_pool;
nxt_queue_link_t link;
#if (NXT_THREADS)
nxt_thread_pool_t *thread_pool;
nxt_event_engine_t *engine;
nxt_log_t *log;
#endif
#if (NXT_DEBUG)
const char *name;
#endif
} nxt_job_t;
NXT_EXPORT void *nxt_job_create(nxt_mem_pool_t *mp, size_t size);
NXT_EXPORT void nxt_job_init(nxt_job_t *job, size_t size);
NXT_EXPORT void nxt_job_destroy(void *data);
NXT_EXPORT nxt_int_t nxt_job_cleanup_add(nxt_mem_pool_t *mp, nxt_job_t *job);
NXT_EXPORT void nxt_job_start(nxt_thread_t *thr, nxt_job_t *job,
nxt_work_handler_t handler);
NXT_EXPORT void nxt_job_return(nxt_thread_t *thr, nxt_job_t *job,
nxt_work_handler_t handler);
#define \
nxt_job_cancel(job) \
(job)->cancel = 1
#if (NXT_DEBUG)
#define \
nxt_job_set_name(job, text) \
(job)->name = text
#else
#define \
nxt_job_set_name(job, text)
#endif
#endif /* _NXT_JOB_H_INCLUDED_ */

24
src/nxt_job_cache_file.c Normal file
View File

@@ -0,0 +1,24 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
nxt_job_cache_file_t *
nxt_job_cache_file_create(nxt_mem_pool_t *mp)
{
nxt_job_cache_file_t *jbc;
jbc = nxt_job_create(mp, sizeof(nxt_job_cache_file_t));
if (nxt_fast_path(jbc != NULL)) {
jbc->file.fd = NXT_FILE_INVALID;
jbc->read_required = nxt_job_file_read_required;
}
return jbc;
}

303
src/nxt_job_file.c Normal file
View File

@@ -0,0 +1,303 @@
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#include <nxt_main.h>
static void nxt_job_file_open_and_read(nxt_thread_t *thr, void *obj,
void *data);
static nxt_int_t nxt_job_file_open(nxt_job_file_t *jbf);
static nxt_int_t nxt_job_file_info(nxt_job_file_t *jbf);
static nxt_int_t nxt_job_file_mmap(nxt_job_file_t *jbf, size_t size);
static nxt_int_t nxt_job_file_read_data(nxt_job_file_t *jbf, size_t size);
static nxt_int_t nxt_job_file_read_required(nxt_job_file_t *jbf);
nxt_job_file_t *
nxt_job_file_create(nxt_mem_pool_t *mp)
{
nxt_job_file_t *jbf;
jbf = nxt_job_create(mp, sizeof(nxt_job_file_t));
if (nxt_fast_path(jbf != NULL)) {
jbf->file.fd = NXT_FILE_INVALID;
jbf->file.accessed = NXT_FILE_ACCESSED_LONG_AGO;
jbf->read_required = nxt_job_file_read_required;
}
return jbf;
}
void
nxt_job_file_init(nxt_job_file_t *jbf)
{
nxt_job_init(&jbf->job, sizeof(nxt_job_file_t));
jbf->file.fd = NXT_FILE_INVALID;
jbf->file.accessed = NXT_FILE_ACCESSED_LONG_AGO;
jbf->read_required = nxt_job_file_read_required;
}
/*
* Must be a function but not a macro, because
* it can be used as function pointer.
*/
void
nxt_job_file_read(nxt_thread_t *thr, nxt_job_t *job)
{
nxt_job_start(thr, job, nxt_job_file_open_and_read);
}
static void
nxt_job_file_open_and_read(nxt_thread_t *thr, void *obj, void *data)
{
size_t size;
nxt_int_t n;
nxt_bool_t read_ahead;
nxt_file_t *file;
nxt_job_file_t *jbf;
nxt_work_handler_t handler;
jbf = obj;
file = &jbf->file;
nxt_log_debug(thr->log, "file job read: \"%FN\"", file->name);
if (file->fd != NXT_FILE_INVALID && jbf->close_before_open) {
nxt_file_close(file);
file->fd = NXT_FILE_INVALID;
}
if (file->fd == NXT_FILE_INVALID) {
switch (nxt_job_file_open(jbf)) {
case NXT_OK:
break;
case NXT_DECLINED:
handler = jbf->ready_handler;
goto done;
default: /* NXT_ERROR */
handler = jbf->error_handler;
goto done;
}
}
if (file->size > 0) {
if (jbf->buffer != NULL) {
size = nxt_buf_mem_size(&jbf->buffer->mem);
size = nxt_min(file->size, (nxt_off_t) size);
read_ahead = nxt_buf_is_mmap(jbf->buffer);
} else {
size = nxt_min(file->size, 1024 * 1024);
read_ahead = jbf->read_ahead;
}
if (read_ahead) {
nxt_file_read_ahead(&jbf->file, jbf->offset, size);
}
if (jbf->buffer != NULL) {
if (nxt_buf_is_mmap(jbf->buffer)) {
n = nxt_job_file_mmap(jbf, size);
} else {
n = nxt_job_file_read_data(jbf, size);
}
if (nxt_slow_path(n != NXT_OK)) {
handler = jbf->error_handler;
goto done;
}
}
}
if (jbf->offset == file->size) {
jbf->complete = 1;
if (jbf->close) {
nxt_file_close(file);
file->fd = NXT_FILE_INVALID;
}
}
nxt_job_return(thr, &jbf->job, jbf->ready_handler);
return;
done:
if (file->fd != NXT_FILE_INVALID) {
nxt_file_close(file);
file->fd = NXT_FILE_INVALID;
}
nxt_job_return(thr, &jbf->job, handler);
}
static nxt_int_t
nxt_job_file_open(nxt_job_file_t *jbf)
{
nxt_int_t n;
if (jbf->test_before_open) {
n = nxt_job_file_info(jbf);
if (n != NXT_OK) {
goto test_directory;
}
if (jbf->file.type == NXT_FILE_DIRECTORY) {
return NXT_DECLINED;
}
if (jbf->read_required(jbf) != NXT_OK) {
return NXT_DECLINED;
}
}
n = nxt_file_open(&jbf->file, NXT_FILE_RDONLY, NXT_FILE_OPEN, 0);
if (n == NXT_OK) {
n = nxt_job_file_info(jbf);
if (nxt_fast_path(n == NXT_OK)) {
if (jbf->file.type == NXT_FILE_DIRECTORY) {
return NXT_DECLINED;
}
return jbf->read_required(jbf);
}
return n;
}
test_directory:
if (jbf->directory_end != 0
&& jbf->file.error != NXT_ENOTDIR
&& jbf->file.error != NXT_ENAMETOOLONG
&& jbf->file.error != NXT_EACCES)
{
jbf->file.name[jbf->directory_end] = '\0';
return nxt_job_file_info(jbf);
}
return n;
}
static nxt_int_t
nxt_job_file_info(nxt_job_file_t *jbf)
{
nxt_int_t n;
nxt_file_t *file;
nxt_file_info_t fi;
file = &jbf->file;
n = nxt_file_info(file, &fi);
if (n != NXT_OK) {
return NXT_ERROR;
}
if (nxt_is_file(&fi)) {
file->type = NXT_FILE_REGULAR;
file->size = nxt_file_size(&fi);
file->mtime = nxt_file_mtime(&fi);
} else if (nxt_is_dir(&fi)) {
file->type = NXT_FILE_DIRECTORY;
file->size = nxt_file_size(&fi);
file->mtime = nxt_file_mtime(&fi);
}
return NXT_OK;
}
static nxt_int_t
nxt_job_file_mmap(nxt_job_file_t *jbf, size_t size)
{
u_char *p, *end;
static nxt_uint_t n;
p = nxt_mem_map(NULL, &jbf->buffer->mmap, size, NXT_MEM_MAP_READ,
(NXT_MEM_MAP_FILE | NXT_MEM_MAP_PREFAULT),
jbf->file.fd, jbf->offset);
if (nxt_fast_path(p != NXT_MEM_MAP_FAILED)) {
end = p + size;
jbf->buffer->mem.pos = p;
jbf->buffer->mem.free = end;
jbf->buffer->mem.start = p;
jbf->buffer->mem.end = end;
jbf->buffer->file_end += size;
jbf->offset += size;
/*
* The mapped pages should be already preloaded in the kernel page
* cache by nxt_file_read_ahead(). Touching them should wire the pages
* in user land memory if mmap() did not do this. Adding to the static
* variable "n" disables the loop elimination during optimization.
*/
n += *p;
for (p = nxt_align_ptr(p, nxt_pagesize); p < end; p += nxt_pagesize) {
n += *p;
}
return NXT_OK;
}
return NXT_ERROR;
}
static nxt_int_t
nxt_job_file_read_data(nxt_job_file_t *jbf, size_t size)
{
ssize_t n;
n = nxt_file_read(&jbf->file, jbf->buffer->mem.pos, size, jbf->offset);
if (nxt_fast_path(n > 0)) {
jbf->buffer->mem.free += n;
jbf->offset += n;
if (nxt_buf_is_file(jbf->buffer)) {
jbf->buffer->file_end += n;
}
return NXT_OK;
}
return NXT_ERROR;
}
static nxt_int_t
nxt_job_file_read_required(nxt_job_file_t *jbf)
{
return NXT_OK;
}

Some files were not shown because too many files have changed in this diff Show More