diff --git a/.clang-tidy b/.clang-tidy index cac411cce14..1658bf3d379 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1 +1 @@ -Checks: -*,readability-braces-around-statements,modernize-use-nullptr +Checks: -*,modernize-loop-convert,modernize-use-bool-literals,modernize-deprecated-headers,performance-unnecessary-value-param,performance-faster-string-find,modernize-raw-string-literal,modernize-redundant-void-arg,modernize-use-nullptr,modernize-use-override diff --git a/.gitignore b/.gitignore index 92388b0aa1d..d14eb96f53b 100644 --- a/.gitignore +++ b/.gitignore @@ -143,6 +143,7 @@ rc/trafficserver.service .libs/ .svn/ +.vscode/ tsxs diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/.indent.pro b/.indent.pro deleted file mode 100644 index b0c23ca8a5a..00000000000 --- a/.indent.pro +++ /dev/null @@ -1,29 +0,0 @@ --nut --nbad --bap --nbbo --nbc --br --bls --ce --ci2 --cli0 --cs --d0 --di2 --nfc1 --nfca --hnl --i2 --ip0 --l120 --lp --npcs --nprs --psl --saf --sai --saw --nsc --nsob --nss diff --git a/ci/coverity-model.cpp b/ci/coverity-model.cpp index c24cfde804e..02f2ed777f7 100644 --- a/ci/coverity-model.cpp +++ b/ci/coverity-model.cpp @@ -50,3 +50,13 @@ void _TSReleaseAssert(const char* txt, const char* f, int l) } } /* extern "C" */ + +// Teach Coverity that the my_exit() in logstats.cc exits ... +struct ExitStatus { +}; + +void +my_exit(const ExitStatus &status) +{ + __coverity_panic__(); +} diff --git a/ci/tsqa/nosetests b/ci/jenkins/bin/autest.sh old mode 100755 new mode 100644 similarity index 53% rename from ci/tsqa/nosetests rename to ci/jenkins/bin/autest.sh index 7486f4d3375..698a8426885 --- a/ci/tsqa/nosetests +++ b/ci/jenkins/bin/autest.sh @@ -1,5 +1,5 @@ -#! /usr/bin/env bash - +#!/bin/sh +# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -16,15 +16,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -e # exit on error +cd "${WORKSPACE}/src" +[ -d tests ] || exit 0 + +autoreconf -if + +INSTALL="${WORKSPACE}/${BUILD_NUMBER}/install" +SANDBOX="/tmp/ausb-$$" + +mkdir -p $INSTALL -TSQA=$(cd $(dirname $0); pwd) +./configure --prefix="$INSTALL" \ + --with-user=jenkins \ + --enable-experimental-plugins \ + --enable-example-plugins \ + --enable-ccache \ + --enable-debug \ + --enable-werror -cd $TSQA +# Build and run regressions +${ATS_MAKE} ${ATS_MAKE_FLAGS} V=1 Q= +${ATS_MAKE} check VERBOSE=Y && ${ATS_MAKE} install -. ./virtualenv/bin/activate +/usr/bin/autest -D ./tests/gold_tests --sandbox "$SANDBOX" --ats-bin "${INSTALL}/bin" +status="$?" +[ -d "$SANDBOX" ] && rm -rf "$SANDBOX" -./virtualenv/bin/nosetests \ - --with-xunit \ - --nocapture \ - "$@" +[ "0" != "$status" ] && exit -1 +exit 0 diff --git a/ci/jenkins/bin/tsqa.sh b/ci/jenkins/bin/clang-format.sh old mode 100755 new mode 100644 similarity index 57% rename from ci/jenkins/bin/tsqa.sh rename to ci/jenkins/bin/clang-format.sh index 744bdb50fc4..2d5fbc1dbe1 --- a/ci/jenkins/bin/tsqa.sh +++ b/ci/jenkins/bin/clang-format.sh @@ -16,11 +16,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Run all the TSQA tests. -TSQA_LAYOUT_DIR="${WORKSPACE}/${BUILD_NUMBER}" -cd "${WORKSPACE}/src/ci/tsqa" || exit 2 -make test "TSQA_LAYOUT_DIR=${TSQA_LAYOUT_DIR}" -status=$? +cd "${WORKSPACE}/src" +autoreconf -if && ./configure -# Exit with proper status -exit $status +# Test clang-format, copy our version of clang-format if needed +if [ ! -d .git/fmt ]; then + if [ -z "${ghprbTargetBranch}" ]; then + cp -rp /home/jenkins/clang-format/master .git/fmt + else + # This is for Github PR's, to make sure we use the right clang-format for the branch. + # This is not an issue on normal branch builds, since they will have the right .git/fmt. + cp -rp /home/jenkins/clang-format/${ghprbTargetBranch} .git/fmt + fi +fi + +${ATS_MAKE} -j clang-format +git diff --exit-code +[ "0" != "$?" ] && exit -1 + +# Normal exit +exit 0 diff --git a/ci/jenkins/bin/github.sh b/ci/jenkins/bin/github.sh index 74941713c66..37b15b4a12f 100644 --- a/ci/jenkins/bin/github.sh +++ b/ci/jenkins/bin/github.sh @@ -16,13 +16,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Setup autoconf +INSTALL="${WORKSPACE}/${BUILD_NUMBER}/install" + +mkdir -p ${INSTALL} cd src autoreconf -if -mkdir -p "${WORKSPACE}/${BUILD_NUMBER}/install" - -./configure --prefix="${WORKSPACE}/${BUILD_NUMBER}/install" \ +./configure --prefix="${INSTALL}" \ --with-user=jenkins \ --enable-experimental-plugins \ --enable-example-plugins \ @@ -30,16 +30,11 @@ mkdir -p "${WORKSPACE}/${BUILD_NUMBER}/install" --enable-debug \ --enable-werror -# Test clang-format (but only where we have the local copy of clang-format, i.e. linux) -if [ -d /usr/local/fmt ]; then - [ ! -d .git/fmt ] && cp -rp /usr/local/fmt .git - make clang-format - git diff --exit-code - [ "0" != "$?" ] && exit -1 -fi - # Build and run regressions -make -j4 -make check VERBOSE=Y && make install +${ATS_MAKE} ${ATS_MAKE_FLAGS} V=1 Q= +${ATS_MAKE} check VERBOSE=Y && ${ATS_MAKE} install + +${INSTALL}/bin/traffic_server -K -k -R 1 +[ "0" != "$?" ] && exit -1 -"${WORKSPACE}/${BUILD_NUMBER}/install/bin/traffic_server" -K -k -R 1 +exit 0 diff --git a/ci/tsqa/Makefile b/ci/tsqa/Makefile deleted file mode 100644 index 205a03b2c07..00000000000 --- a/ci/tsqa/Makefile +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - --include /etc/os-release -DISTRIBUTION := $(NAME)$(VERSION_ID) - -# Search for a program in $PATH (direct from the GNU make manual. -pathsearch = $(firstword $(wildcard $(addsuffix /$(1),$(subst :, ,$(PATH))))) - -APT := $(call pathsearch,apt-get) -DNF := $(call pathsearch,dnf) -YUM := $(call pathsearch,yum) - -ifndef DISTRIBUTION -ifneq ($(APT),) -DISTRIBUTION := DebianGeneric -else ifneq ($(DNF),) -DISTRIBUTION := RedHatGeneric -else ifneq ($(YUM),) -DISTRIBUTION := RedHatGeneric -endif -endif - -PACKAGES_Fedora23 := python-pip python-virtualenv python-devel libffi-devel redhat-rpm-config -PACKAGES_RedHatGeneric := python-pip python-virtualenv python-devel libcffi-devel -PACKAGES_DebianGeneric := python-pip python-virtualenv python-dev libffi-dev - -PACKAGES := $(PACKAGES_$(DISTRIBUTION)) - -VIRTUALENV_DIR := virtualenv - -NOSETESTS := ./nosetests - -# Run all tests. -.PHONY: test -test: $(VIRTUALENV_DIR) - @$(NOSETESTS) --with-xunit --nocapture --verbose --logging-level=INFO - -# Scan and list the tests. -.PHONY: list -list: $(VIRTUALENV_DIR) - @$(NOSETESTS) -sv --collect-only - -# Construct the virtualenv. -.PHONY: $(VIRTUALENV_DIR) -$(VIRTUALENV_DIR): $(VIRTUALENV_DIR)/.done - -$(VIRTUALENV_DIR)/.done: - @if [ ! -d $(VIRTUALENV_DIR) ]; then\ - virtualenv $(VIRTUALENV_DIR);\ - fi - $(MAKE) update - @echo "Virtualenv ready!" - -# Update the virtualenv with the latest TSQA package and dependencies. -.PHONY: update -update: - @rm -f $(VIRTUALENV_DIR)/.done - @. $(VIRTUALENV_DIR)/bin/activate && \ - $(VIRTUALENV_DIR)/bin/pip install --upgrade pip && \ - $(VIRTUALENV_DIR)/bin/pip install --upgrade -r requirements.txt && \ - touch $(VIRTUALENV_DIR)/.done - -# Install TSQA bootstrap dependencies. -.PHONY: bootstrap -bootstrap: - @echo package list is $(PACKAGES) - @if [ -e /etc/debian_version ]; then \ - $(APT) install -y $(PACKAGES) ; \ - fi - @if [ -e /etc/redhat-release ]; then \ - $(YUM) install -y $(PACKAGES) ; \ - fi - -.PHONY: clean -clean: - rm -rf virtualenv diff --git a/ci/tsqa/README.rst b/ci/tsqa/README.rst deleted file mode 100644 index 0e55c8bed21..00000000000 --- a/ci/tsqa/README.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, - software distributed under the License is distributed on an - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied. See the License for the - specific language governing permissions and limitations - under the License. - -================== -How do I run TSQA? -================== -TSQA is mostly self contained (using python's virutalenv). There are currently only -two external depencies (below package names are for RHEL/Centos): - - python-virtualenv - - libcffi-devel - -Run ``sudo make bootstrap`` to install the TSQA dependencies. - -Once these two packages are available you simply need to run "make test" in this -directory to run all tests. - -If you wish to run single tests you may do so by using nosetests from the -virtualenv directly-- this can be done by running something like: - - ./virtualenv/bin/nosetests tests/test_example.py - - -===================== -How do I write tests? -===================== -There are examples here in the trafficserver source tree (test_example.py), in -trafficserver-qa (https://github.com/apache/trafficserver-qa/tree/master/examples), -and other test cases to read through. If you have any questions please feel free -to send mail to the mailing lists, or pop onto IRC. - - -===================== -Where do I put tests? -===================== -At this point there aren't a lot of tests, so it may be difficult to know *where* -to put your test. The general plan is to group tests by functionality. For example, -if you have a keepalive test it should go with the rest of the keepalive tests. -In general where we put the test is a lot less important than the test itself. -So if you are confused about where to put it please write the test and submit a -patch or pull request, and someone will help you place it. diff --git a/ci/tsqa/TODO b/ci/tsqa/TODO deleted file mode 100644 index c01205ba090..00000000000 --- a/ci/tsqa/TODO +++ /dev/null @@ -1,7 +0,0 @@ -# TODO list for tsqa - -- runtests script - - run specific things within the virtualenv - - package up output from specific tests in a relocateable way -- Documentation/Examples of TSQA framework -- pylint for test cases diff --git a/ci/tsqa/files/cert.pem b/ci/tsqa/files/cert.pem deleted file mode 100644 index fcac0917a29..00000000000 --- a/ci/tsqa/files/cert.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDVzCCAj+gAwIBAgIJAOY9Arrh4/IgMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNV -BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg -Q29tcGFueSBMdGQwHhcNMTUwMTIyMDE1MjU3WhcNMTUwMjIxMDE1MjU3WjBCMQsw -CQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZh -dWx0IENvbXBhbnkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA -08GU61mR18JO9X20utgemoeeYyKx+LXZYQBc0cKwHzZIiYfokwCkCNekMjZ87DT2 -+++8lBf3PatSgtA8/xanr8+TTDbKPehqdItDAy9e/xYgPBz9RXHuBUeOw+CPxt2e -aGrGwy6ybW3jne/+vm73wn+ZzldpwGGXwIQAS9lFqtmisx/DftL8fhzpfp/uIU/K -Y33iMiPpEHi8CHrOsaREl787ipKoqfxs+d1JNTHu1I+wJKgppOrtyjF1AjYDmrRg -RO8rJqIaUKS+8teV2KazwfdPkgNyaoZO7NCPPEjWkbp2c+2AJQqCSyZmJ63idgkR -msaSjRx45vJPOU/KFVHLuQIDAQABo1AwTjAdBgNVHQ4EFgQUtL1CTVRABxDQVbZy -WwOZWMCs08QwHwYDVR0jBBgwFoAUtL1CTVRABxDQVbZyWwOZWMCs08QwDAYDVR0T -BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAComsgXv9V7utk6yY1XV+rtjZmyRV -758Jjzb2sqqVtw8jtEvdiO6rK+Chb49cAcBGJFHZL2/CJ6BWSOf79fLj/IGKC/nX -UBz0dxrlg9x/KR/Jtp0qqQXIw/HT/NvaytYxMIBKqkmjG+kWiPn61dvwFjIERPOb -xM4lHhaO/PKWDDVx6Sf7UzMalmwFjaGQFXCNM5dfqvdqDYYrbZwEWuqmxNy1sZBY -SfY7Tyz6OP9NnlgtWRAITPqBS2kx/MVCGd2TtzJcJDxKK67tr0QFenGtXSZy555Q -bNKjXKVWiHrVCEgttPri22o7Ax1Q6FpLHMXDIiveUl6aXq4ulNzRqXpmaw== ------END CERTIFICATE----- diff --git a/ci/tsqa/files/ec_keys/README.rst b/ci/tsqa/files/ec_keys/README.rst deleted file mode 100644 index c3dd1e1ecaa..00000000000 --- a/ci/tsqa/files/ec_keys/README.rst +++ /dev/null @@ -1,29 +0,0 @@ -All of these certificates are self-signed and are *not* secure. They are intended -only for use in testing. - -Try to use existing certs if possible rather than generating your own. - -# generated using (make sure to set "hostname"): -openssl ecparam -name prime256v1 -genkey -out key.pem -openssl req -new -x509 -key key.pem -out cert.pem - - -## Since we want to verify all of the certificate verification, we need to generate -## our own CA and intermediate CA -# Create CA -openssl ecparam -name prime256v1 -genkey -out ca.key -openssl req -new -x509 -nodes -sha1 -days 1825 -key ca.key -out ca.crt - -# Create Intermediate -openssl ecparam -name prime256v1 -genkey -out intermediate.key -openssl req -new -sha1 -key intermediate.key -out intermediate.csr - -# CA signs Intermediate -openssl x509 -req -days 1825 -in intermediate.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out intermediate.crt - -# Create Server -openssl ecparam -name prime256v1 -genkey -out www.example.com.key -openssl req -new -key test.example.com.key -out test.example.com.csr - -# Intermediate signs Server -openssl x509 -req -days 1825 -in test.example.com.csr -CA intermediate.crt -CAkey intermediate.key -set_serial 01 -out test.example.com.crt diff --git a/ci/tsqa/files/ec_keys/ca.crt b/ci/tsqa/files/ec_keys/ca.crt deleted file mode 100644 index a70f990c6d3..00000000000 --- a/ci/tsqa/files/ec_keys/ca.crt +++ /dev/null @@ -1,12 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIByDCCAW6gAwIBAgIJAP0vC/lirtMJMAkGByqGSM49BAEwQTELMAkGA1UEBhMC -WFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEMMAoGA1UECgwDQVRTMQ0wCwYDVQQD -DARyb290MB4XDTE1MDQxMzIwMTEwMloXDTIwMDQxMTIwMTEwMlowQTELMAkGA1UE -BhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEMMAoGA1UECgwDQVRTMQ0wCwYD -VQQDDARyb290MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEVRCzxLeGp2zzqqz6 -YTHRJ+sTuEzrFNUUQX/sEb4s1uceiqtTgFJ8kglWGMk/3WIC09PF4aRvkXM+xVvx -U9EcaKNQME4wHQYDVR0OBBYEFF9E7e3RCj6b4rQeNVTnNHGgRhzvMB8GA1UdIwQY -MBaAFF9E7e3RCj6b4rQeNVTnNHGgRhzvMAwGA1UdEwQFMAMBAf8wCQYHKoZIzj0E -AQNJADBGAiEAtKiG3JParqhQz1N+QOGKJtbgFS/qwNpK9FanbC6MOLQCIQD+heQN -eow8AF4hAUZNYvxyhZDd5FKzF2kRdxJUGkZK8w== ------END CERTIFICATE----- diff --git a/ci/tsqa/files/ec_keys/ca.key b/ci/tsqa/files/ec_keys/ca.key deleted file mode 100644 index 275e3e9406b..00000000000 --- a/ci/tsqa/files/ec_keys/ca.key +++ /dev/null @@ -1,8 +0,0 @@ ------BEGIN EC PARAMETERS----- -BggqhkjOPQMBBw== ------END EC PARAMETERS----- ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIKR1N01PYCnkwa07tTnZ3Ri6dsGxu/OlTmExDWS1JIt6oAoGCCqGSM49 -AwEHoUQDQgAEVRCzxLeGp2zzqqz6YTHRJ+sTuEzrFNUUQX/sEb4s1uceiqtTgFJ8 -kglWGMk/3WIC09PF4aRvkXM+xVvxU9EcaA== ------END EC PRIVATE KEY----- diff --git a/ci/tsqa/files/ec_keys/intermediate.crt b/ci/tsqa/files/ec_keys/intermediate.crt deleted file mode 100644 index 2a2fc1d768b..00000000000 --- a/ci/tsqa/files/ec_keys/intermediate.crt +++ /dev/null @@ -1,10 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIBcTCCARcCAQEwCQYHKoZIzj0EATBBMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwM -RGVmYXVsdCBDaXR5MQwwCgYDVQQKDANBVFMxDTALBgNVBAMMBHJvb3QwHhcNMTUw -NDEzMjAxMTQ4WhcNMjAwNDExMjAxMTQ4WjBJMQswCQYDVQQGEwJYWDEVMBMGA1UE -BwwMRGVmYXVsdCBDaXR5MQwwCgYDVQQKDANBVFMxFTATBgNVBAMMDGludGVybWVk -aWF0ZTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCLloHhXc49EwEI94gb6186J -zp5mHmEBD49I3pFuQwkVLu249uCsyEnjhoAlMohC/Oc/ROtvZTnujcdBZ2OBh4cw -CQYHKoZIzj0EAQNJADBGAiEAzevMu2yohbN5dzRp5/TTxKSOrenLh56jtSJrtFai -/wUCIQDV40abfGSiioLyb5PoyJRPa6M+AhWbK9caa2SQei+KnQ== ------END CERTIFICATE----- diff --git a/ci/tsqa/files/ec_keys/intermediate.key b/ci/tsqa/files/ec_keys/intermediate.key deleted file mode 100644 index bb1cdc55b38..00000000000 --- a/ci/tsqa/files/ec_keys/intermediate.key +++ /dev/null @@ -1,8 +0,0 @@ ------BEGIN EC PARAMETERS----- -BggqhkjOPQMBBw== ------END EC PARAMETERS----- ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIMtffsDv9JDl4AFznb1ftzA8IqIVxA344PSpyZU6PfA/oAoGCCqGSM49 -AwEHoUQDQgAEIuWgeFdzj0TAQj3iBvrXzonOnmYeYQEPj0jekW5DCRUu7bj24KzI -SeOGgCUyiEL85z9E629lOe6Nx0FnY4GHhw== ------END EC PRIVATE KEY----- diff --git a/ci/tsqa/files/ec_keys/www.example.com.pem b/ci/tsqa/files/ec_keys/www.example.com.pem deleted file mode 100644 index ee31b561f1d..00000000000 --- a/ci/tsqa/files/ec_keys/www.example.com.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIGCAR+s6Sno+AteQgnMBOsS7sD4EbSxGN7anPQaossvkoAoGCCqGSM49 -AwEHoUQDQgAEwNOf/ym+XidKYjQg2WDM3GPK2eMbRz2VmvdB4dbzBxQ4gMYCIl2l -2L7lLqGtmUcuUhDaOxf91hhXAfprU+qRvA== ------END EC PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIBfDCCASICAQEwCQYHKoZIzj0EATBJMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwM -RGVmYXVsdCBDaXR5MQwwCgYDVQQKDANBVFMxFTATBgNVBAMMDGludGVybWVkaWF0 -ZTAeFw0xNTA0MTMyMDEzMjlaFw0yMDA0MTEyMDEzMjlaMEwxCzAJBgNVBAYTAlhY -MRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxDDAKBgNVBAoMA0FUUzEYMBYGA1UEAwwP -d3d3LmV4YW1wbGUuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEwNOf/ym+ -XidKYjQg2WDM3GPK2eMbRz2VmvdB4dbzBxQ4gMYCIl2l2L7lLqGtmUcuUhDaOxf9 -1hhXAfprU+qRvDAJBgcqhkjOPQQBA0kAMEYCIQCU7CxO/zdFc4BDUCHO07wVuFe7 -RyiVVJs4llEZTXoBiAIhAIwrXtE2psZBRx/TE7miPunqa+1E4IxrtWn2fkzJyJ57 ------END CERTIFICATE----- diff --git a/ci/tsqa/files/ec_keys/www.test.com.pem b/ci/tsqa/files/ec_keys/www.test.com.pem deleted file mode 100644 index e5192761f4b..00000000000 --- a/ci/tsqa/files/ec_keys/www.test.com.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEILVRI/Y9isXZJKXwb4srPN4hjx+ZUWGmSL3cn8AEhTVQoAoGCCqGSM49 -AwEHoUQDQgAEh4NjyzcxA2B/b281cUsRHaF+yAUV4CnIhUkPQigXw10GO9lQx69w -of7PjZkJRdeBlEMBVUcwTKEuENMZ7a3+Tw== ------END EC PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIBdzCCAR8CAQEwCQYHKoZIzj0EATBJMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwM -RGVmYXVsdCBDaXR5MQwwCgYDVQQKDANBVFMxFTATBgNVBAMMDGludGVybWVkaWF0 -ZTAeFw0xNTA0MTMyMDEzMzZaFw0yMDA0MTEyMDEzMzZaMEkxCzAJBgNVBAYTAlhY -MRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxDDAKBgNVBAoMA0FUUzEVMBMGA1UEAwwM -d3d3LnRlc3QuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEh4NjyzcxA2B/ -b281cUsRHaF+yAUV4CnIhUkPQigXw10GO9lQx69wof7PjZkJRdeBlEMBVUcwTKEu -ENMZ7a3+TzAJBgcqhkjOPQQBA0cAMEQCIH083uGRd7b1crw6TH8paBZNeliJTiFU -eg6lrnGEVIKpAiBtCERpWAlJhYBrR5ApPp6jSoM+Zk6YfswUSg2YR7c4Sg== ------END CERTIFICATE----- diff --git a/ci/tsqa/files/header-rewrite.config b/ci/tsqa/files/header-rewrite.config deleted file mode 100644 index 4a06c96f21f..00000000000 --- a/ci/tsqa/files/header-rewrite.config +++ /dev/null @@ -1,13 +0,0 @@ -cond %{READ_REQUEST_PRE_REMAP_HOOK} -cond %{PATH} /^.*addcookie$/ [AND] -add-cookie testkey testaddvalue - -cond %{READ_REQUEST_PRE_REMAP_HOOK} -cond %{PATH} /^.*rmcookie$/ [AND] -rm-cookie testkey - - -cond %{READ_REQUEST_PRE_REMAP_HOOK} -cond %{PATH} /^.*setcookie$/ [AND] -set-cookie testkey testsetvalue - diff --git a/ci/tsqa/files/key.pem b/ci/tsqa/files/key.pem deleted file mode 100644 index fddcacb0512..00000000000 --- a/ci/tsqa/files/key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDTwZTrWZHXwk71 -fbS62B6ah55jIrH4tdlhAFzRwrAfNkiJh+iTAKQI16QyNnzsNPb777yUF/c9q1KC -0Dz/Fqevz5NMNso96Gp0i0MDL17/FiA8HP1Fce4FR47D4I/G3Z5oasbDLrJtbeOd -7/6+bvfCf5nOV2nAYZfAhABL2UWq2aKzH8N+0vx+HOl+n+4hT8pjfeIyI+kQeLwI -es6xpESXvzuKkqip/Gz53Uk1Me7Uj7AkqCmk6u3KMXUCNgOatGBE7ysmohpQpL7y -15XYprPB90+SA3Jqhk7s0I88SNaRunZz7YAlCoJLJmYnreJ2CRGaxpKNHHjm8k85 -T8oVUcu5AgMBAAECggEBANFqt8kNGtPDIW1c9Vh3FcUDbFtkW5e42BM7VZBItv8X -IyOIWjTPRGpOQN87zc2YD85WaCwZi3TcsswV/szTbeDMK0MLSHVzHZzGgO5scclZ -62Un0j5Uju1/uCv1MJueXuOq/YjX7LOWIq32Q/u3KKWcpdJP1pDgs0A8C0L3zBNK -PjxnCO0FvJdcpqajEhtepYyTQAtWm/igWbuFgUcfZ55HTOBfBiLdACh6anbCdDJ8 -f2COFRrKu9Gn9mVyRirbyCa4B3VSj4R+WlKsc3erR2vNiEdJLd9x5OK7ZvMFHTvG -V4BhWt3ffSBRIaVi0pIpYekWbnXjbqY6zjchiy3ruOUCgYEA6W4yBrbExJmelXCj -dPOp5Ds/uAYaq9TkRLWzX837swPNh1+XJ9xGNgn4d5DbikN1xSdsJO/1dpwk3Uxg -qE/tEvA2gip/DaxIcj3PfoPtFyebgZItvs5k97zGw9n0bgqoRAezzUl4Guz4AQVV -Xz+3gICN1lFhRqxKm7Pt8Kc3D5cCgYEA6DrpLJCzOEd8qlhm6w7UGruBRA+QLACu -zlqzkf4rw1vaXx4cP8ctoCiVWUIsPI0mD2sQvtXAPT8KzZqh3UCu1zyyochyCuVg -C3fBQiSDtUb2Uk6u7fNFrn36oN7W/Q+sarJvIIECR1PjEGuT3eJppQgJB/VGUZqa -OQJyTJPXaS8CgYEAskz8o0o51F3u1wEZqbxw+acUDbGD79qGncEYiUZiSqPN+uhW -IhlL+/zzsAiS2PKcY4KwRSqRGQ89zVeIwSeD06JuUFC7iaseDz0NX/rPP49+ZaNN -k+A9GUo1nOW/oco8KvKjMVw8BH0bFlSHmGCn/tyy+pBguEXkGzh9uANRuHMCgYAM -TZKs2b2k7aSdIbHSIib6g5SFlo18x0x7gjKhOWX4I5WeFGpKtrKkGYJQCEFvs8qg -ZnusoIZeuEhKPDb3EcYxgPW1vHjOOvirotyKNIUFxYynL6P01z6J0ALHIwcgwQPR -Y0Kf5jXIsZkF9a0PxD70j0hrM4NWL2qcOpTzmaF/4QKBgFYQVrrI6YBxexKQ4J/m -tG/OKlxef6mzrctu7RJGxzt9ag1IgOi10BMCIKCW7tfvhzzLuBiJ0imEGe+MYrau -yIWCOVpmwcSnww8bV/25oqRxduVwZzmtZdUJbNSdiZ7jq4tsV9a8TZXts45veSFQ -X+HWsoFkRoYLOx96mqN94/ZS ------END PRIVATE KEY----- diff --git a/ci/tsqa/files/rsa_keys/README.rst b/ci/tsqa/files/rsa_keys/README.rst deleted file mode 100644 index 9fb6a8d7560..00000000000 --- a/ci/tsqa/files/rsa_keys/README.rst +++ /dev/null @@ -1,28 +0,0 @@ -All of these certificates are self-signed and are *not* secure. They are intended -only for use in testing. - -Try to use existing certs if possible rather than generating your own. - -# generated using (make sure to set "hostname"): -openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -nodes && cat key.pem cert.pem > keypair.pem && rm key.pem cert.pem - - -## Since we want to verify all of the certificate verification, we need to generate -## our own CA and intermediate CA -# Create CA -openssl genrsa -out ca.key 4096 -openssl req -new -x509 -nodes -sha1 -days 1825 -key ca.key -out ca.crt - -# Create Intermediate -openssl genrsa -out intermediate.key 4096 -openssl req -new -sha1 -key intermediate.key -out intermediate.csr - -# CA signs Intermediate -openssl x509 -req -days 1825 -in intermediate.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out intermediate.crt - -# Create Server -openssl genrsa -out test.example.com.key 4096 -openssl req -new -key test.example.com.key -out test.example.com.csr - -# Intermediate signs Server -openssl x509 -req -days 1825 -in test.example.com.csr -CA intermediate.crt -CAkey intermediate.key -set_serial 01 -out test.example.com.crt diff --git a/ci/tsqa/files/rsa_keys/ca.crt b/ci/tsqa/files/rsa_keys/ca.crt deleted file mode 100644 index b0ce838e22a..00000000000 --- a/ci/tsqa/files/rsa_keys/ca.crt +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFNzCCAx+gAwIBAgIJAL8yIx0Q66ZXMA0GCSqGSIb3DQEBBQUAMDIxCzAJBgNV -BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxDDAKBgNVBAoMA0FUUzAeFw0x -NTA0MTMxODQ5NTNaFw0yMDA0MTExODQ5NTNaMDIxCzAJBgNVBAYTAlhYMRUwEwYD -VQQHDAxEZWZhdWx0IENpdHkxDDAKBgNVBAoMA0FUUzCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBANYzN4s+B8KAbPxdpMzoVh+xsgPeauothjQq8tQmViT5 -3bnEf7zb+4Wy/7Y9CLj2CSnf6tv271OVwVQZWlDrmqr6greUePz28j3/sJ/lQ9bR -aZOWQI3d4rgCy6UdJ6rhD2BYrZVFnIZQ5zVZu3rbUBuPwBuRQdTLOWjzcSguyg2R -jiJ/W2/IToRjgX27cPPArhJQ2ibwsbtvqecj1lQfT5yg8WEDeeOyYwzfT4VEGm7f -8Q4qrBKxhdRTF6LhNkVKkOM6Jvvq7ULNpSAh1+zFAfzMpPUt1T+sjObQ0HHMeh0J -ghwOh8OAqFyMAsNdzFwjz4lSrliTMYoq6JdK0In5FUqlCt5RsKrfxsykhvUb66/R -+a9uGboEMlce/sZcxSpnDJYOmxrecQYmEaoKhbIcJBrRqYMgyQL9X+JJfsdyKR2V -CbSV0FjHeybpOwl76QGoZPhRy2e551uqKN0qdQMmfI3ZjZqmN1EuuOcRNKU8r0He -AOTRZ2BssDfvs/YWgZsKoHqoLIQCGgWw1ZhsK8bhFKKBtBqbyAu995XxhJezbSAg -57+1Icp8qmCWvCO0zxm5ckCRNICxGolT/0Mi5Cl8hmfbx9Gv4Na/KEcxpKLPy43t -UUqfCtvGOUE/S6TWleL5YII4mI86fIOHFkAozdeokcF54lhENpcLL3R5ggRiBJ83 -AgMBAAGjUDBOMB0GA1UdDgQWBBTMx2X9hPjXNTOKTSQELEBv4pEHADAfBgNVHSME -GDAWgBTMx2X9hPjXNTOKTSQELEBv4pEHADAMBgNVHRMEBTADAQH/MA0GCSqGSIb3 -DQEBBQUAA4ICAQCZ+B8wevb1DsA0XsGkBIHrAK2zc4u54nIhVEw/6PnJBBpV5Eha -rKtamhyeBjKt1267SaoQIWy/os9QlEYmn2AKn7J0rxqokowr0r8jdh4nDDeCGGkb -g7ieinIpuUr5Unpp+J+9/FXtrX1m5sXuHlMao9eTK86NoXPJgt4z4HQd1ZaEJp5s -H60PVd93TwcIDQjbsGdpFS7LhtYSx5Np/LmrQj0tnt18AUh1SgVwvtAqiSsyhxFa -yPBZKIsdbNQkPoaIKQovCg0cRjlRr1XAk+cfRbf8OUmS1JMs1+/b0zX0kk9xynhj -4CUPxsVy4SnOeg95yPB8BEyvU1uxnflq3QTJsSxcePYte5ni1wx8Vbn7cJusJRYK -LNaEFq/nkFxAlP4PTkv9mGq7ZYLqwpE9s6rPmJZc37ti0OUmLiVpqk6DtN5x/TD2 -vKeZMupGCWF71kueR4QAClEnDHe6/lKqe7CH3OuY8bA+7N0RNrEqUBJ7qnD6Frcu -UfmrkZcIj6DWSnpfwL884WoSCkYuyYP/v+PhR5fSao3l7ZB9UQXdXYhx/Kyd3lPZ -DKSZjOthCm1dblzKLK4VHy0dmAQdIczIXY6ztIKUY8z0poMNiyJEGeYYPf5jjxU4 -Q2u9W/ReEaza6HshFnoka4IZqlfLinWRoAt92rA+nFIShaBBimvc32kHrQ== ------END CERTIFICATE----- diff --git a/ci/tsqa/files/rsa_keys/ca.key b/ci/tsqa/files/rsa_keys/ca.key deleted file mode 100644 index d8922969535..00000000000 --- a/ci/tsqa/files/rsa_keys/ca.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEA1jM3iz4HwoBs/F2kzOhWH7GyA95q6i2GNCry1CZWJPnducR/ -vNv7hbL/tj0IuPYJKd/q2/bvU5XBVBlaUOuaqvqCt5R4/PbyPf+wn+VD1tFpk5ZA -jd3iuALLpR0nquEPYFitlUWchlDnNVm7ettQG4/AG5FB1Ms5aPNxKC7KDZGOIn9b -b8hOhGOBfbtw88CuElDaJvCxu2+p5yPWVB9PnKDxYQN547JjDN9PhUQabt/xDiqs -ErGF1FMXouE2RUqQ4zom++rtQs2lICHX7MUB/Myk9S3VP6yM5tDQccx6HQmCHA6H -w4CoXIwCw13MXCPPiVKuWJMxiirol0rQifkVSqUK3lGwqt/GzKSG9Rvrr9H5r24Z -ugQyVx7+xlzFKmcMlg6bGt5xBiYRqgqFshwkGtGpgyDJAv1f4kl+x3IpHZUJtJXQ -WMd7Juk7CXvpAahk+FHLZ7nnW6oo3Sp1AyZ8jdmNmqY3US645xE0pTyvQd4A5NFn -YGywN++z9haBmwqgeqgshAIaBbDVmGwrxuEUooG0GpvIC733lfGEl7NtICDnv7Uh -ynyqYJa8I7TPGblyQJE0gLEaiVP/QyLkKXyGZ9vH0a/g1r8oRzGkos/Lje1RSp8K -28Y5QT9LpNaV4vlggjiYjzp8g4cWQCjN16iRwXniWEQ2lwsvdHmCBGIEnzcCAwEA -AQKCAgEA0Sxc3mYp77+4Lk5IRC3TBMpjW7HU2HCycLlMPu5dC2jSJLoGP+jW275s -Roudu/nQAwt+b2XaWtaQX88OSp15geR2yE1+RRHmh7LsyYh60gnYPt7olWGMKEPa -mJg7B30WpfCTOoUrEGNrFcdV9Oi8dt3PLVyRW/tFSf8JjsL6X3u3wGp8YVHLky8U -4jKsX+zWUF6SWpqMBc1KrsRpZebZuMvWS/W4DypB3e0o26wg4AtifIIhXXPsi+bE -2zNw6wOhL7S3IyVMowLtcM577OSKR1OU9zTV3YNkaXabR7X1vetuGnX6EGnQf+fe -PuiXS5dQ8PpD9Y1wQEwcrLnLoiESrvK8zYytZDR4KTlWP6eIhnn/6EyEV13pV7AH -LQ4ZarRq0A2i9HAPeZuUYZ8YxT5Yhl+CvyzsCwd08e9YGSLKTCmZKYwmVZVOcwsq -OOB6srHNwSH15lxwf1uHh1zn1QyG29tXlJeEKTqZ64VYxDAHiuXFBrEAFXmwvzw3 -YcuTRkDoxUFNuwdr35lXBpjtYuUDYprODEDnyJC2T+bDo1htczKNlSKLJNZJPsYS -47C7DFj1S2CGXGM3WuEPV76ge92F42U6F0cGvThnB02D09wUilSWiCeJmhUW9LRD -N5QofYra1DdJ4N5bBqMwoUafF7fH7qb1Dwq9tQo0qaIP2YR8JTECggEBAPsKRg+j -IKETzcxlwCIZvAeoMny3srMrRYbqA8UnBDvzd81fOXj/r1PSEfGFKeuRhOqANQ6o -mowIQibIKcUb6Lc51voWGb5VeRwfLLoMkHZLHcKysJtRwMP+ceRUZCTfxetvptsd -laalVLxfXzoy/UKrUPtz2V6LtCtB5u77Xucz9GQoqWdHAmFcjlUqFUEXU3wj/J7k -roS7n++pKfxeA0YhiQHjBj2i0d0W+rX1B78AeS7IbG0nUIxj2yb6OojxLTIhsRox -CYbmX+7UmLO79rdJ2YAKMWoEC3erHfyGduxWUWVEy0gw4Xgzd0VnbD78iAn8LVbd -D9dC2bHWmx11UVkCggEBANpunIAKxsU6U6rnMrRm/p3KxdOLPNksxmEg983SG1AW -sDH/ZSycNYphHB8vQmVsH0DDI//djRbdbx4gCSBInZm6x97nsSeq4zk8ymoHiL34 -KVZcCgDfW+n7cl1DVHCQAdqv295cn2NX4VPaGG0CFyMGU48C1Xo1GnAYAHJC4OiA -vn+OWMUoRjDNRNvl03zmWpXMcR0DMuKKlnYh/0sHuNtlKVHIVMpubk6xXYKF32Ku -+nxpYSTyVl/gZWNbxveDELCH9fatGcGidDytEMELvrYOPTAhwGLBfC8b5r1jNF9o -WRDOy3IV+vBM4m+IZQ6NBsjly7HII4SCvFBHHH9NUw8CggEAfC1uzplgsIz9Rdgn -HDcinZ8DqlGr4ZjMAZqMVt+f5o/eeQZbhZDkh0/odFHslIwc9WqJ1EBugkorfseQ -ceIGUWwY/Qgln9Lj35dEJpvEcd9iT0bYuVvNQKYCbHGaP+s5GebW4JhXyGEL80ox -7rX+NRZNLrT1lyD/9E3bD+fj3/YBM/IxKdckREjmVxZEATiqoUOj2G1CLoIo8QH6 -tM1ETVKVVQuZsK9zpCuTxKH6PO7dAtk9WRkTbP6QD03D2q2CL8QeuNf84G/gkGE1 -T+LwfWXYYs+n9cseaQmKAbn55ZolFTTcYU9cmHrMytYH43hDSr0lKEydY1u8F44S -mJXJsQKCAQAUjQ+gZGMrLz+r9rColIAkLG3MZ29EJ5hKjyWBceEr+dZ9/b9xIQlQ -ia5xqKqgjDmsiC+c5SVfH1pTBUSWiv549j/T0wyx5LwYKIYKz8anyO9qtqIiTuGA -9C1gZPNO292wy5buJtzAJgaHMx/VMbsninLWI7geFVz1auyv02cBuQszSRtFAXMJ -KGbwIYFyYOHo8iE6T5C8C7n4R16Tmphj2/K1RRnlKdqrALkBjMLlr/zVM7z2Eu7Z -em1PmdTweJ5bXY9gtAyWUUiKZOYMo6Q/0LslCiflorqiXAerjefhihnHIQ23ICZJ -8ZHkWHrZkZ8vy58MjP36e6poCyi9OXM1AoIBAQC01zUvApHgYUprHmnsO4ynazao -DYSLHTJq/VYjH/TjxuXML2xOF72IL/l/e9xN9OiWHAGIda63N7m7j5rnyWsMm8vR -nUeJlraFDBN4T96Xf3YaNvbZHfuLjBur2q+SIUMdvqZzllRghLaDAf3g9VHtXBWO -d70FveQS2dImpYp5XhhdOEVCOkMqUuOCu2Gfe8a8K+GoHMTrSD5z5amjzHx6t02d -4oU9Rr7TZnrkrxL0JapyqLsZr6ZafK26lqqRANNQIX5crUkprKp8HGv7fmqDsdGI -qTKbYliQ8KjyfSlfj5qBsqhic1RsonNmf/RL5dNevKxxVR7OCzTnN5lHM8Ln ------END RSA PRIVATE KEY----- diff --git a/ci/tsqa/files/rsa_keys/intermediate.crt b/ci/tsqa/files/rsa_keys/intermediate.crt deleted file mode 100644 index eacc2b9d128..00000000000 --- a/ci/tsqa/files/rsa_keys/intermediate.crt +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIE7zCCAtcCAQEwDQYJKoZIhvcNAQEFBQAwMjELMAkGA1UEBhMCWFgxFTATBgNV -BAcMDERlZmF1bHQgQ2l0eTEMMAoGA1UECgwDQVRTMB4XDTE1MDQxMzIwMDI0M1oX -DTIwMDQxMTIwMDI0M1owSTELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQg -Q2l0eTEMMAoGA1UECgwDQVRTMRUwEwYDVQQDDAxpbnRlcm1lZGlhdGUwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDe2tfo88v44Tl9TATixb5qmBobnAy/ -oYpaX2St28qNuAC47ftkFaE+4zZIN8IeOAKkMC29ZVxOhXoNWovu4aa9PXdnmatQ -M/vezta451JE7lVQK0q6dViGyeevBkumgQ2TD+Kj6R2uH5zHDSSVEWEJIb8rTb5i -1pA4pF4ZLMBLAoZwrQfH9xjNZWSi7UJ0g1eYwRh5ahjl2SBRxSrH6GEUBCuI7S1o -AvUvj0aDVXmPJEuyrkvw1u566g7DyQ242SnqdZnHVP17g1YmqCbq6qxipb6YKOke -LBOL1zMKjdVQ3QAbw7Y7RBFcVUdUvoh4uL+IoWr+A8s99OxUhz2kEXU+gjwJtOOR -keZGzMo9rA9cProwHC414Q++Ct0YhUAO2vTjEhnEvlJFv8cGRYT/Oifwrxny6KTA -kC+4gwgrJgWR9DHeL814aTnotRUGLLsZAeo+O0al3/0QlN09XDFstkmn/mgaCmbF -LN9dqwvg1gwS5Hbd+vdlguhcx5mlaI+jZp29CBwBdkkKVAns/3Tne+KpwJIsdRMp -es+ajkFEZoOc6GVBrW/BjxQ6JBgQA6R4//eWunbMYfhkNY5l9SS1qZak1j9+qWAX -JcSQVbUxgRSe01MzIqScc2btB8WJbi/IJp+hcdFLDzKYIolT7mRm8Xu6j3M6YF7E -UaOuyRqoRiIr/wIDAQABMA0GCSqGSIb3DQEBBQUAA4ICAQCk4HQOq4A9cwmrPVk6 -vqR8CmRYiStM76RmLqX3XeVx1GybfvZzK/WYLcwInmXD4iitQflB9S0Ut0ChuLKo -klj10wdAv6iNrRMUtmY9w8zV8GSLRHZrOo6Rd0affUNjBy79FaNypmdrydc8+M7L -pGWKVN4qN5cOs/XLFT2TFk8davnyFOucP7kxWeNiZ38Kh8cAYYkovMpfcEJSrMGS -musUqvAlPVZXDgBjblVHjZ2dvgRCdYFwrMxo14SALOmM1Hi7oWbX5A8uNdeoq8lB -mwdUBpdWDm6IgJ2aiufOqBV3Mv6AUysLE+qHdP/lTHIHNc0LinuJK10F/jqbf9uY -gzETU6HT1gQ3X3noDRB2vMMPJQOx8uQX04dfUx8WOmgwCx1X5Vc8gLhH+JbZz1PZ -GlWgX5VGiGgLkq+rgQBLOLIe/NAnUCXPG78DttQpWMs8JjxEcgnVjLNZNAaWymAl -U3JzmbX5UZePwEMjKSjKNDraU3Tuq+QTdPr+W2ilJEsLLPbsenc1QTNsPUv5aI8h -LaayTP7aWMVg0QFfV1O7+vVz5ej7CO7Be7w57VWGDwBBVm81vDbeFna4RJPk2qAw -K5wOYak0Z27v+0wBXHaceH0j45fn/lixt7FANv9NhB3krcjrUwUsooMAIsnCKAlK -nQD8ySYdm+OGchPeRVW+MH8FPw== ------END CERTIFICATE----- diff --git a/ci/tsqa/files/rsa_keys/intermediate.key b/ci/tsqa/files/rsa_keys/intermediate.key deleted file mode 100644 index 561765ff58e..00000000000 --- a/ci/tsqa/files/rsa_keys/intermediate.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKgIBAAKCAgEA3trX6PPL+OE5fUwE4sW+apgaG5wMv6GKWl9krdvKjbgAuO37 -ZBWhPuM2SDfCHjgCpDAtvWVcToV6DVqL7uGmvT13Z5mrUDP73s7WuOdSRO5VUCtK -unVYhsnnrwZLpoENkw/io+kdrh+cxw0klRFhCSG/K02+YtaQOKReGSzASwKGcK0H -x/cYzWVkou1CdINXmMEYeWoY5dkgUcUqx+hhFAQriO0taAL1L49Gg1V5jyRLsq5L -8NbueuoOw8kNuNkp6nWZx1T9e4NWJqgm6uqsYqW+mCjpHiwTi9czCo3VUN0AG8O2 -O0QRXFVHVL6IeLi/iKFq/gPLPfTsVIc9pBF1PoI8CbTjkZHmRszKPawPXD66MBwu -NeEPvgrdGIVADtr04xIZxL5SRb/HBkWE/zon8K8Z8uikwJAvuIMIKyYFkfQx3i/N -eGk56LUVBiy7GQHqPjtGpd/9EJTdPVwxbLZJp/5oGgpmxSzfXasL4NYMEuR23fr3 -ZYLoXMeZpWiPo2advQgcAXZJClQJ7P9053viqcCSLHUTKXrPmo5BRGaDnOhlQa1v -wY8UOiQYEAOkeP/3lrp2zGH4ZDWOZfUktamWpNY/fqlgFyXEkFW1MYEUntNTMyKk -nHNm7QfFiW4vyCafoXHRSw8ymCKJU+5kZvF7uo9zOmBexFGjrskaqEYiK/8CAwEA -AQKCAgBagZZeTWTxVsb6U/1H+/cxY0R3IhErYnfF7Cf/U9wXYjso373RD9fEqpJJ -EhwMGcM42zg4SwoJ+btv4O4jvhDxmhz8PjSNBg+slWQvBAMta33KaUqYH0AsvaMS -OgRPQuo4Z6Mr3mjnZn9Nd9D7mWtHQiVZeOFxUqKP3nE42CvgSs4+xIb8dyXjhOIy -KRlsKEtTBljiNmyWLHArxV9ygLWsY2Uq3ugp7cmV3yJMBFKyB5OWpaLB1QhVcuk8 -KlMgV1HmnowtoB6yIszCNlhX36bTCW02bqb+Ufg+Os/H4YJYrOh2Xi2MNEC4twmZ -KShTnvRqkOPa9b99EhNI4QPvtgde8XzZ15etuLG5Jg1McjJi6EK5AQx6NAfKWSpd -lEryShnePd6Tx8taBsBG29ibPfAGuH/enhhgHjs2Tw4WY+fHq0laH3H0NI9EpV80 -ygAynoYfCeyUJiTpv9/dq49V+lJ+SJ5Wpc9kGRGxcMlIvCUETgiSHkz9TOmhb15H -qX63B+AfMDwLSFwhnWJydPslXNqNH+ACzRAr0vSFYtfAs1I537JGKAJT3eZOt3lg -lfXrEe5HKSxANx1qUvFIyRBFZYVWCb3gXvfU8fKGLM2xDuRBaNsEtlQzFuAN6lXd -iDe9+upHJHRiAKb3xtM7Gfjp9UzffXZjwxmg+p2w5wHQKVFbgQKCAQEA9hFafDVH -RwchKEAzGA+CxVIIA/1LIq0ZenOZJGX+px2nP5C0L0DV4L66c+r1b8zzt7+dooA/ -f0UQg++a/DzHCaM4SB0GAhzeHndq1P+1/9rFabOJmWpkeTCvkTW6Onq0yjOoYhnI -7DeoKrK8jxZGIRUV0bOLIKro3w764SnjAwNgdfi0MVDdEAgMn/ZM9ZetoEVeTY7K -saq/Gz/N/sTO/qv13ARqCxIzQr578pXP50Z0h9l9ln1DWaVoqa9IhpkMlPzrlmU8 -8ZVJIIofOUcvSq8366LmdFJ2ftIgFkYfAT5iVyoauikeGkWTWbKCxqrHz9R6HvLi -9LguykeBeoml2QKCAQEA59mgyivkaYBolozJkD2C4mJfnQZB6znMzDsccDxqBvDu -L6vAZYaIFsvW5df9G7uqKsQ4NTlfr1Ea2vdFHxs+nSO3v7cHdj0KFQBO2LkIr31z -lfvGm73+eK+7d0tmD2L9GZ0swCxDzt94iT9tUuiPj8f3CEOCuUbHdlszHE3Q8jFD -GSW688v9nxKTR0eQhYY248Var67puHF+CuzOjlgbr+W2+21h0wLAfu9Qqwp+2ry9 -8S1qJ8Mi2y7SGvjxMpWbMBA7O1/sCcrcuHpsata+RwowctyvqEEfaKyLmlfxJGCI -wPoTHwLuoIMjLQJ+OxAm5URnhbKhsAjcDgi2zveBlwKCAQEAxLaKx9Ev8jBY6Xyp -XArKWESD8+yCLG6Fd8cCHn8LXT3vfnmIEl6anNjc5d5n58DI9ZRRyJ8OJAhqc48R -L6TG2YWKcNwC+Z1qVKDS3wSt0qRqPV4yGltbhybMtCFnh2ihLySs7//9CBpWbgwF -gb4kwj3A+6u1BaCcGfY3ydlaigYhDy6LnwEuOKq2rQqR66QeQYozIX2NvQrrTDVt -0MY1VJLUMde1jrQ2Fp2BKSVq11ETx6avJ6dsODZrvXtLV47y6Ahprgmw9tCJbWH+ -JCQsQmBhLBdGdKeX4zrIPAZCsxPUOiqGw1wrnIUSjxqOQZQ1uIf8ONGjbk/v1/Xi -JLv1wQKCAQEAln5hLFoJTaIYQYDpZUNILgKE5bwmFbXTBc5oy0Gr4Q2Kzk7B+CS3 -OXTe7RqiJNpPvqrXgVTYk0gmEnPm3iYlIoMIxtzbbxh37uHgTDTvOlpIKNbhOD6/ -az9GhEzg1qP+fh3T6nnRGftcllohcGucpEu8QhTwSatz1ZOlPX4VXuAzGaOwEoga -/KJmDyKY4NMZ1gdIsjjrZNnmYtkLysHOVwLZH6MEmJ90q/BTgHKznPBeqqo7+ctk -WLmvj+p/RcJulWgzynqdEE4pr2Vn8oGpyRsID8bIDoMXDC7T/z+OO3qdygWJW9vf -YTk+H/06SsRVOwSH3fTxlv5pOILXhsiJyQKCAQEAvSS36aam59KHJP8MG0Xm1OId -cXSQqC1TUFt9mfIfiNdlmQWqlX/FWQSKo3B/ufJVChSu7Vy4pWd2FKfqhDsVrJ/W -6U1U7QJdqsYMN8yWGHEC9riBFl/Pu6vaCluWm/qANzoSMaLB1oMy4SCxGui+6yxN -YktI6E+fRTzPdj/1hznyqicnmOBarzUKItfkwyh6fyIgfaFD9N79CmjZWLpKf16Y -lp6hmCR6BAErj0jjKZjsC51DfAs5fnnA6csZxWtGF3HJnIqz/Fdjsb2R+RNlWQGX -2pZ8hMV0OvEajK66ApbqQMeJzQwGi91Xr6w1z+EvAr6H6jECE+daY2GDtJ7V5Q== ------END RSA PRIVATE KEY----- diff --git a/ci/tsqa/files/rsa_keys/www.example.com.pem b/ci/tsqa/files/rsa_keys/www.example.com.pem deleted file mode 100644 index 4bc273df694..00000000000 --- a/ci/tsqa/files/rsa_keys/www.example.com.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCrS3J3uL2MNr1Q -9zL7ODukw8UOHN4AwuWsNSDJzaswrgxtFO3VfJbCfo7CUtojoJY39783m3HnLhPA -cFxfsSgQ0Z8TdUZYtq2I1p/eEp/A6kdQffHS4To0ueYQ8r1I6pMJTniaDi8ICD0d -837WwbUUaxx8IhzaFNPrvSdhJb1LGPxj8YvfKvHF9rkwqpJSEAXFCKTa/H0i8wb8 -h3u4cXZmYN6aooai3SY1KGlWZtAfbgTlMMtvB3ZE3JsNNwbL4gfzxYsfXTW9QUZ2 -kT3irmB80koQjzDiW6MpxmLpvDD/NpQWSiR9TDZYCJtACwWyoxqo8gHZVqsF/rkK -yqkg2cbJAgMBAAECggEAf/ZGtsUdZGdoGdnxDda+R6GvzZEnDy6JYJH3womP/zem -NL7TxQ3jmbvtbaFzL/ZBAeJjyGipOGglfTby6tFu+tF9oo2TVaZyEK00lDMZgIYD -bFAJnN2AG+9bvQF5AcWqveMPGRbLb5aoAX3rHQdr/KrfhqP9JbU1cv/FMT9+H2B7 -Spty/WJYOL6AhzN4H4YHJzfhn2e0iMfA3usu1hha84FWWAR3+Z4sphSCtY+edumT -ygES/j5TAX7nu5Eyqe2L4natuDLXiLEbKEpqyfAg1SmTDqAiHrMtWkWv/e+tdTs8 -+DE+wPVhRCjVyjejjvsgDV/d9B805bbpq7M4eyIh+QKBgQDhHPcd+b5KZdXU/PiC -GQ/5C9elW35t7D8gaCzLAzoxW7B/PnTmVyK+QPBsxaUNg87BxB0TTssNUulSY2nk -TVemOe57xdJKiX7nOG+9vIZFGxlzqywMz0o6kZe3No8PAcp91K7Xr3RkXdHHwyZY -+NDiUIeMptEnoeEbFZcv+YQ+8wKBgQDCzB/5tD6D8uGKfsiakMs5HGEsYyJNP2b5 -UQ6e1spFg1noMfc5Vj+Av5lF0AE6tELEvJe0J8z2rA9zfHC6JkY/zOGMXrILF0S7 -KSTcxfBUqeE0OUJMkDNy61lRGs5ISul0qcGU473EsBEaxi999GcqzLrB5V4CHZlJ -EUam9SSqUwKBgQCVkcE/UWh11iO1WD9lcXGDb7LgU2I1dvqadZ0NZh+MG/exE7Zo -NQ0Ii+0y2D9KM4F0jPEkmv2e5K/R5eu9nQXXlDY4Vr/adnCzAHR+BHzR/adziw/B -kxkmwQWk3cM/nVkFMgLZm+IhrZRsveUEyI1BUXA+q7fcNVpzvGyvm4GasQKBgEW9 -XMlCLYuB3ht+ToV/xzIYJfYFO9eaFly2F1zomxwN7ZdCpDcD2NJYRiCHWplQxgK3 -Xjyiby/048c9ywHqCAZ5bFqb4HQ2DWZQUaE0wFkfRMA0q7bLfY/sEFsIFMgvAavB -xstuSZdsTYNfZstaP8FD8KzQWDq7rBBLvhax90F/AoGBANNhkbNxWiUJX/+6VtRj -u7msBgrUpYQtLkyY+13Ry8cdf+8w1die0LZ4unYjIcAS1ro+XGOc0GASc6pb5dYG -X+RxTyZzoNazbC6JEsFx9IJLn/L0/8jsg368m1f0Dkptd2LzrLsw2zuY6wm8DrLH -Re4GALck6zlT+rZNLuN13p5Z ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIECTCCAfECAQEwDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UEBhMCWFgxFTATBgNV -BAcMDERlZmF1bHQgQ2l0eTEMMAoGA1UECgwDQVRTMRUwEwYDVQQDDAxpbnRlcm1l -ZGlhdGUwHhcNMTUwNDEzMjAwNDA3WhcNMjAwNDExMjAwNDA3WjBMMQswCQYDVQQG -EwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MQwwCgYDVQQKDANBVFMxGDAWBgNV -BAMMD3d3dy5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAKtLcne4vYw2vVD3Mvs4O6TDxQ4c3gDC5aw1IMnNqzCuDG0U7dV8lsJ+jsJS -2iOgljf3vzebcecuE8BwXF+xKBDRnxN1Rli2rYjWn94Sn8DqR1B98dLhOjS55hDy -vUjqkwlOeJoOLwgIPR3zftbBtRRrHHwiHNoU0+u9J2ElvUsY/GPxi98q8cX2uTCq -klIQBcUIpNr8fSLzBvyHe7hxdmZg3pqihqLdJjUoaVZm0B9uBOUwy28HdkTcmw03 -BsviB/PFix9dNb1BRnaRPeKuYHzSShCPMOJboynGYum8MP82lBZKJH1MNlgIm0AL -BbKjGqjyAdlWqwX+uQrKqSDZxskCAwEAATANBgkqhkiG9w0BAQUFAAOCAgEAGrOo -IRDUjxt65cBR9OZSXRdnL6szAMuNHWlK0MfVtTATWXDKU9S3KjK6seo+ebyaqt1J -nlyUZ79n6+vU5uSIDANpYQ5Z1DuV5NM2V9o2QiRqExwfgpGUcAXAi0lQ79eA2kzi -cDgDIpbEAJTFP5/uinaRA9H4KqqfMg5m34tu44A01brb2h/czPOWxD89mKKbtS9H -ODPkDkq3wTUG/F0RQvfFC6Na8IWkW0jijDBxuFeSbRV00GH3/wpMBcxDuIcKBJcy -tFrXjCFKzop7djU7OuxEnqQdlgiHgQsszjnLP0k5Lz9CrNG7W+zqmYsvO6s6a94Z -8lHuwl9GAS/IFQS+c+PsPT7uxgSfbdFHWlmOv1+p/PsPaAQ4Hu/ugga8AjMYoyHg -V6LBCTbUK7aA3lnu6+EW3qpGve6Z2H1D+B5V8ZXLuA6ooIS80CN9xw6jFcAXcUv8 -pNw/sJDnErzRbxsOPKeLl2EjJ/1/MsQf5DNp3xPFyhv5l3oOxodICEr4eeudGmAW -OGp21xqo7i3K8YQ6GDMPF6RVX4mOCxY5L9qf+VDLYx8AV9gFZ+VWG8we7jyruqxE -DwetsfCtwXo5gho9p+K8VHA7nWukydBNiekPFD6S6gBu0vMZxIfW/AKolWgxmyPz -zZhR8034yoSR6qxuh+as+JpXmw+3MypZ5G/1S54= ------END CERTIFICATE----- diff --git a/ci/tsqa/files/rsa_keys/www.test.com.pem b/ci/tsqa/files/rsa_keys/www.test.com.pem deleted file mode 100644 index 5a0cfa72f3b..00000000000 --- a/ci/tsqa/files/rsa_keys/www.test.com.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUiFw0p0JOfm/G -0YHivkd5v5Xu5tkBWETu9NgKLr7MkEBeqb1fjkm8Gd1IsndoYh0R/tkRzdE6O+lN -WjMgEuCtz+5zblBQ9xDMwAKI+1YhudqrwIxqXBzpyvAAZwU5EHftog/WiEDvJPM0 -XEQsx6142kgVR5NfNX6HWHOL8H86j7Yfbdd0Knu0Qv9tulC8aeBbZ+pKU0/cBCVi -ljr8OFdrCZu1EkKIat3oBjdbEF4boh4gNSsxbX+89uW4SZYR/G4xEX4VZJLMsloG -YOkcOLTV90+VDbbdrZsnmgQgpKBlp2NBdSx7MZQRU5De3PMBzo2uoIvmwXS9ZxtT -Lr+/dpxZAgMBAAECggEANh1uVN3NrUzWSypnRwOqEV7t30GaOZRvIOTo6VbTsCR+ -r1vK4zzIm4N+a5c9fi+VNVLNlJHyV0CP++keoWkNGlSaY3vQKX1vIqM1Qgm0+atn -+Vlyp1ZC6miIyaFxnAEMeE5OeBKDbiDbaBaiKUDCc8Yomnp6FMD7MZ0c9qHK027z -DL0kYjxjVgYiDnE1fJ1ZV466IcyDnGDky2ebjjvvTI7c55tbrEc+VRZdC5Cn5Yb/ -desjXGAb5snEbdAiNqCgIG9bLw3hHWsn0wCkUPRcEMoSZ73taak54vC7cPda7/D5 -aetzsNQWCMZC5NZ74JD5kyC3xBr7TAarQ5OXsJ0DMQKBgQD8GTQdc2MHvA+4nCpp -0Xy3J7gOCI63FWETZgw22sboqk1toF/LFQHYY40EdvEkFsqc6zTm09QK8y8G48sC -j8pvskhO2M9EgLegzPQv6NY0pTqltI0Ye7w9I3FTKNZIJW+XokDEFFqu+lXqf80i -BRmoUrltoS8L5XB/z5GoNMdk7QKBgQDX0mZSYdjPgTDWDSTCdEOcGB4bjAZzXDTu -ukYhri0vuStZ51giAWUlbBnN/hlvln5bULqZFnB5svFRujTIwwevxzhSl45aiNl1 -vE8zsRS0bNeNyjjF6HX0HOEuAQtre/k+WHvEH3mnFR1Zngwbrt4CJyuadlCmj7yw -jv/DVIyznQKBgAbmobiUqgdSLJP/ImIXK/TPj4hCz7VPToL7biYqQvunfcsccsLa -ZlyIDRosL1mvjghRn/cZoVpTYdwsbCg7y2zXUodmA/Z6F4y9T4noM8TpKPvUP3CG -IpcB215NZeA/thhOhrtXW0wi6isrKHBf913WNeE8Yk9PDo9RHUmfeD3ZAoGBAJGn -t9LFopN4t0LfH/30hWSlijxBJmFYy4iKQqacbHaW28ETNxHMKz00Vb4GTZhX0vNB -6o1C7anUsLTdnJ4ZsehZ5ZMoIbTMQycIbdOPIVAbXOaeoe4/UsvrabWoktJ5mt8O -zIiyTWIMCADhf353Z/HACdd3HjsrKsdl2wsy1rqpAoGBAOaNsxoyw/BIn/wvkQRs -YyYXLb8zECO17ad4M+aFsnf7yLY8i3k80JRfjqxis/yrePNuVKHiZYgEvLNBMdJi -j3YSMgXHYSgYti73+zBcy1uVVlUR5Q2HnihFX5Ho1IAQbC+TXzUCnLOEoR/IIHIz -+qWsoHgJD98ie3cjys0WujpH ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIEBjCCAe4CAQEwDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UEBhMCWFgxFTATBgNV -BAcMDERlZmF1bHQgQ2l0eTEMMAoGA1UECgwDQVRTMRUwEwYDVQQDDAxpbnRlcm1l -ZGlhdGUwHhcNMTUwNDEzMjAwNDE0WhcNMjAwNDExMjAwNDE0WjBJMQswCQYDVQQG -EwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MQwwCgYDVQQKDANBVFMxFTATBgNV -BAMMDHd3dy50ZXN0LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -ANSIXDSnQk5+b8bRgeK+R3m/le7m2QFYRO702AouvsyQQF6pvV+OSbwZ3Uiyd2hi -HRH+2RHN0To76U1aMyAS4K3P7nNuUFD3EMzAAoj7ViG52qvAjGpcHOnK8ABnBTkQ -d+2iD9aIQO8k8zRcRCzHrXjaSBVHk181fodYc4vwfzqPth9t13Qqe7RC/226ULxp -4Ftn6kpTT9wEJWKWOvw4V2sJm7USQohq3egGN1sQXhuiHiA1KzFtf7z25bhJlhH8 -bjERfhVkksyyWgZg6Rw4tNX3T5UNtt2tmyeaBCCkoGWnY0F1LHsxlBFTkN7c8wHO -ja6gi+bBdL1nG1Muv792nFkCAwEAATANBgkqhkiG9w0BAQUFAAOCAgEARQSyOqxM -ecr1SvIu65yZbOPq29343KewRs39ZbjsEbmm5SMdFs7asWHtbD6iiU3mPzqAG0Y3 -i+S98J7xwSvAnWERVNzWhymCu7MQ/mKM9WZWDRGUshFgaYpWDqjw2a/qVC54f/Ye -OaeDqzWc96Ib0khZdE+IyqpLdclxagVHv0cJOQwqhKOkIDQGEeBTc8AA/luOnC8d -tE9s8IwTmGpZPYY+kwGVbaLKPanONGDiZM5IyZbBgb9Dq3wL/5DXvQApid37BzHJ -ceT/gKDQljXOSWacxCPUSNb6aut5ivfbd9w7kXdTZL9UcS+FGPNXQ2z1yu3VaqJV -MTSfSW5KUHKVLwDsp9hvBpid8S3TpQLeGt68wXoHxvh5PMnsgdA4JxsOTTwKP/gE -rYhockBGuftKOaRqlRn4+n/nSfLogYoD+32S6ZMBJpPuMBXODiezsP+eAb+wqrV7 -4tiKJC70YNVL7XMdmsDtxj8lcY+aouREnd2+Iutr+YrCM5ZIOKAq57Ib4qTNV3Is -F0KDEHRBpiS9hcGYzhg39Dovc6RHc6QRqKVrfZfE3jKXfCtefcqceIsh3jH51ONI -sudX6SOv76PMjnAj0uqF8Kw6YVzLO/MGbAyGilIXYc18GdRWL4gqj9Z9BiLuNcZU -8AhkHaBWKOHEtVZ8ueEkRBS3I0JZZ8ZKlTM= ------END CERTIFICATE----- diff --git a/ci/tsqa/requirements.txt b/ci/tsqa/requirements.txt deleted file mode 100644 index 0aa7ecf8903..00000000000 --- a/ci/tsqa/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -# requirements for the python virtualenv - -# TODO: pin a specific version -https://github.com/apache/trafficserver-qa/archive/master.zip -pyyaml -pyOpenSSL -# TODO: can't do python_version in requirements.txt files -#hyper; python_version >= '2.7' -dnslib diff --git a/ci/tsqa/tests/helpers.py b/ci/tsqa/tests/helpers.py deleted file mode 100644 index 5bc45ce151e..00000000000 --- a/ci/tsqa/tests/helpers.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import tempfile - -import tsqa.environment -import tsqa.test_cases -import tsqa.utils - -unittest = tsqa.utils.import_unittest() - - -# TODO: check that the given path is relative -def tests_file_path(path): - ''' - Return the absolute path to a file with relative path "name" from tsqa/files - ''' - base = os.path.realpath(os.path.join(__file__, '..', '..', 'files')) - return os.path.join(base, path) - - -class EnvironmentCase(tsqa.test_cases.CloneEnvironmentCase): - ''' - This class will get an environment (which is unique) but won't start it - ''' diff --git a/ci/tsqa/tests/test_body_factory.py b/ci/tsqa/tests/test_body_factory.py deleted file mode 100644 index 2842a6f258e..00000000000 --- a/ci/tsqa/tests/test_body_factory.py +++ /dev/null @@ -1,73 +0,0 @@ -''' -Test body_factory -''' - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import requests -import logging -import random -import tsqa.test_cases -import helpers - -log = logging.getLogger(__name__) - - -class TestDomainSpecificBodyFactory(helpers.EnvironmentCase): - ''' - Tests for how body factory works with requests of different domains - ''' - @classmethod - def setUpEnv(cls, env): - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory - }) - cls.configs['remap.config'].add_line( - 'map / http://www.linkedin.com/ @action=deny' - ) - cls.body_factory_dir = os.path.join(cls.environment.layout.prefix, cls.configs['records.config']['CONFIG']['proxy.config.body_factory.template_sets_dir']) - cls.domain_directory = ['www.linkedin.com', '127.0.0.1', 'www.foobar.net'] - for directory_item in cls.domain_directory: - current_dir = os.path.join(cls.body_factory_dir, directory_item) - try: - os.mkdir(current_dir) - except: - pass - fname = os.path.join(current_dir, "access#denied") - with open(fname, "w") as f: - f.write(directory_item) - fname = os.path.join(current_dir, ".body_factory_info") - with open(fname, "w") as f: - pass - - def test_domain_specific_body_factory(self): - times = 1000 - no_dir_domain = 'www.nodir.com' - self.domain_directory.append(no_dir_domain) - self.assertEqual(4, len(self.domain_directory)) - url = 'http://127.1.0.1:{0}'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - for i in xrange(times): - domain = random.choice(self.domain_directory) - headers = {'Host': domain} - r = requests.get(url, headers=headers) - domain_in_response = no_dir_domain - for domain_item in self.domain_directory: - if domain_item in r.text: - domain_in_response = domain_item - break - self.assertEqual(domain, domain_in_response) diff --git a/ci/tsqa/tests/test_cache_generation.py b/ci/tsqa/tests/test_cache_generation.py deleted file mode 100644 index 5e9a39ccc81..00000000000 --- a/ci/tsqa/tests/test_cache_generation.py +++ /dev/null @@ -1,159 +0,0 @@ -''' -Test the cache generation configuration -''' - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import subprocess -import logging -import requests -import random -import uuid -import time - -import helpers -import tsqa.test_cases -import tsqa.utils - -log = logging.getLogger(__name__) - - -class TestCacheGeneration(helpers.EnvironmentCase): - ''' - Test the cache object generation ID. - ''' - - def _fetch(self, path): - url = 'http://127.0.0.1:{}/{}'.format( - self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'], - path - ) - log.debug('get {}'.format(url)) - return requests.get(url, headers={'x-debug': 'x-cache,x-cache-key,via,x-cache-generation'}) - - def _dump(self, response): - log.info('HTTP response {}'.format(response.status_code)) - for k, v in response.headers.items(): - log.info(' {}: {}'.format(k, v)) - - def _ctl(self, *args): - cmd = [os.path.join(self.environment.layout.bindir, 'traffic_ctl')] + list(args) - out, _ = tsqa.utils.run_sync_command( - cmd, - env=self.environment.shell_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT - ) - return out - - @classmethod - def setUpEnv(cls, env): - - cls.configs['plugin.config'].add_line('xdebug.so') - - cls.configs['remap.config'].add_line( - 'map /default/ http://127.0.0.1/ @plugin=generator.so' - ) - cls.configs['remap.config'].add_line( - 'map /generation1/ http://127.0.0.1/' + - ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + - ' @plugin=generator.so' - ) - cls.configs['remap.config'].add_line( - 'map /generation2/ http://127.0.0.1/' + - ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + - ' @plugin=generator.so' - ) - - # Start with cache generation turned off - cls.configs['records.config']['CONFIG']['proxy.config.http.cache.generation'] = -1 - # Wait for the cache so we don't race client requests against it. - cls.configs['records.config']['CONFIG']['proxy.config.http.wait_for_cache'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.config_update_interval_ms'] = 1 - - def test_generations_are_disjoint(self): - """Test that the same URL path in different cache generations creates disjoint objects""" - objectid = uuid.uuid4() - - # First touch is a MISS. - ret = self._fetch('default/cache/10/{}'.format(objectid)) - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.headers['x-cache'], 'miss', msg=ret) - self.assertEqual(ret.headers['x-cache-generation'], '-1') - - # Same URL in generation 1 is a MISS. - ret = self._fetch('generation1/cache/10/{}'.format(objectid)) - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.headers['x-cache'], 'miss') - self.assertEqual(ret.headers['x-cache-generation'], '1') - - # Same URL in generation 2 is still a MISS. - ret = self._fetch('generation2/cache/10/{}'.format(objectid)) - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.headers['x-cache'], 'miss') - self.assertEqual(ret.headers['x-cache-generation'], '2') - - # Second touch is a HIT. - ret = self._fetch('default/cache/10/{}'.format(objectid)) - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.headers['x-cache'], 'hit-fresh', msg=ret.headers['x-cache']) - self.assertEqual(ret.headers['x-cache-generation'], '-1') - - def test_online_cache_clear(self): - """Test that incrementing the cache generation acts like a cache clear""" - objectid = uuid.uuid4() - - # First touch is a MISS. - ret = self._fetch('default/cache/10/{}'.format(objectid)) - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.headers['x-cache'], 'miss') - - # Second touch is a HIT. - ret = self._fetch('default/cache/10/{}'.format(objectid)) - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.headers['x-cache'], 'hit-fresh') - - # Now update the generation number. - timeout = float(self._ctl('config', 'get', 'proxy.config.config_update_interval_ms').split(' ')[-1]) - generation = random.randrange(65000) - gencount = 0 - - self._ctl('config', 'set', 'proxy.config.http.cache.generation', str(generation)) - self._ctl('config', 'reload') - - for _ in xrange(5): - if gencount == 0: - log.debug('waiting {} secs for the config to update'.format(timeout / 1000)) - time.sleep(timeout / 1000) - - ret = self._fetch('default/cache/10/{}'.format(objectid)) - self.assertEqual(ret.status_code, 200) - - if ret.headers['x-cache-generation'] == str(generation): - if gencount == 0: - # First time we see the new generation, it should be a miss. - self.assertEqual(ret.headers['x-cache'], 'miss') - else: - # Now the previous hits should become misses. - self.assertEqual(ret.headers['x-cache'], 'hit-fresh') - else: - # Config has not updated, so it should be a hit. - self.assertEqual(ret.headers['x-cache'], 'hit-fresh') - self.assertEqual(ret.headers['x-cache-generation'], '-1') - - gencount = gencount + 1 - - self.assertNotEqual(gencount, 0, msg='proxy.config.http.cache.generation never updated') diff --git a/ci/tsqa/tests/test_chunked.py b/ci/tsqa/tests/test_chunked.py deleted file mode 100644 index 1a6861441bf..00000000000 --- a/ci/tsqa/tests/test_chunked.py +++ /dev/null @@ -1,199 +0,0 @@ -''' -Test chunked request/responses -''' -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import requests -import time -import logging -import json -import uuid -import socket - -import helpers - -import tsqa.test_cases -import tsqa.utils -import tsqa.endpoint - -log = logging.getLogger(__name__) - -import SocketServer - - -class ChunkedHandler(SocketServer.BaseRequestHandler): - """ - A subclass of RequestHandler which return chunked encoding optionally - - /parts/sleep_time/close - parts: number of parts to send - sleep_time: time between parts - close: bool whether to close properly - """ - - def handle(self): - # Receive the data in small chunks and retransmit it - conn_id = uuid.uuid4().hex - while True: - data = self.request.recv(4096).strip() - if data: - log.info('sending data back to the client') - else: - log.info('Client disconnected') - break - inc_lines = data.splitlines() - try: - uri = inc_lines[0].split()[1] - except IndexError: - break - parts = 5 # how many things to send - sleep_time = 0.2 # how long to sleep between parts - close = True # whether to close properly - if uri[1:]: # if there is something besides / - uri_parts = uri[1:].split('/') - if len(uri_parts) >= 1: - parts = int(uri_parts[0]) - if len(uri_parts) >= 2: - sleep_time = float(uri_parts[1]) - if len(uri_parts) >= 3: - close = json.loads(uri_parts[2]) - resp = ('HTTP/1.1 200 OK\r\n' - 'X-Conn-Id: ' + str(conn_id) + '\r\n' - 'Transfer-Encoding: chunked\r\n' - 'Connection: keep-alive\r\n' - '\r\n') - self.request.sendall(resp) - for x in xrange(0, parts): - self.request.sendall('{0}\r\n{1}\r\n'.format(len(str(x)), x)) - time.sleep(sleep_time) - if close: - self.request.sendall('0\r\n\r\n') - else: - self.request.sendall('lkfjasd;lfjas;d') - - time.sleep(2) - - -class TestChunked(helpers.EnvironmentCase): - @classmethod - def setUpEnv(cls, env): - ''' - This function is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - - # create a socket server - cls.port = tsqa.utils.bind_unused_port()[1] - cls.server = tsqa.endpoint.SocketServerDaemon(ChunkedHandler, port=cls.port) - cls.server.start() - cls.server.ready.wait() - - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}/'.format(cls.port)) - - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.connect_attempts_timeout': 5, - 'proxy.config.http.connect_attempts_max_retries': 0, - 'proxy.config.http.keep_alive_enabled_in': 1, - 'proxy.config.http.keep_alive_enabled_out': 1, - 'proxy.config.exec_thread.limit': 1, - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.http.chunking_enabled': 1, - }) - - def test_chunked_origin(self): - ''' - Test that the origin does in fact support keepalive - ''' - self._client_test_chunked_keepalive(self.port) - self._client_test_chunked_keepalive(self.port, num_bytes=2) - self._client_test_chunked_keepalive(self.port, num_bytes=2, sleep=1) - - def _client_test_chunked_keepalive(self, - port=None, - times=3, - num_bytes=None, - sleep=None, - ): - if port is None: - port = int(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect(('127.0.0.1', port)) - - url = '/' - if num_bytes is not None: - url += str(num_bytes) - if sleep is not None: - if num_bytes is None: - raise Exception() - url += '/' + str(sleep) - - request = ('GET ' + url + ' HTTP/1.1\r\n' - 'Host: 127.0.0.1\r\n' - '\r\n') - uuid = None - # test basic - for x in xrange(1, times): - s.send(request) - resp = '' - while True: - response = s.recv(4096) - for line in response.splitlines(): - line = line.strip() - if line.startswith('X-Conn-Id:'): - r_uuid = line.replace('X-Conn-Id:', '') - if uuid is None: - uuid = r_uuid - else: - self.assertEqual(uuid, r_uuid) - resp += response - if resp.endswith('\r\n0\r\n\r\n'): - break - for x in xrange(0, num_bytes or 4): - self.assertIn('1\r\n{0}\r\n'.format(x), resp) - s.close() - - def test_chunked_basic(self): - url = 'http://127.0.0.1:{0}'.format(self.port) - ret = requests.get(url, proxies=self.proxies) - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.text.strip(), '01234') - - # TODO: fix keepalive with chunked responses - def test_chunked_keepalive_server(self): - url = 'http://127.0.0.1:{0}'.format(self.port) - ret = requests.get(url, proxies=self.proxies) - conn_id = ret.headers['x-conn-id'] - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.text.strip(), '01234') - - # make sure that a second request works, and since we have keep-alive out - # disabled it should be a new connection - ret = requests.get(url, proxies=self.proxies) - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.text.strip(), '01234') - self.assertEqual(conn_id, ret.headers['x-conn-id']) - - def test_chunked_keepalive_client(self): - self._client_test_chunked_keepalive() - self._client_test_chunked_keepalive(num_bytes=2) - self._client_test_chunked_keepalive(num_bytes=2, sleep=1) - - def test_chunked_bad_close(self): - url = 'http://127.0.0.1:{0}/5/0.1/false'.format(self.port) - # TODO: better exception catch (seems to be ConnectionError) - with self.assertRaises(Exception): - requests.get(url, proxies=self.proxies, timeout=2) diff --git a/ci/tsqa/tests/test_connect_attempts.py b/ci/tsqa/tests/test_connect_attempts.py deleted file mode 100644 index 5bb41bfdd5f..00000000000 --- a/ci/tsqa/tests/test_connect_attempts.py +++ /dev/null @@ -1,250 +0,0 @@ -''' -Test Origin Server Connect Attempts -''' -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import requests -import time -import logging -import socket -import struct -import select -import threading - -import helpers - -log = logging.getLogger(__name__) - - -def thread_die_on_connect(sock): - sock.listen(0) - # poll - select.select([sock], [], []) - # exit - sock.close() - - -def thread_reset_after_accept(sock): - sock.listen(0) - first = True - num_requests = 0 - while True: - connection, addr = sock.accept() - num_requests += 1 - if first: - first = False - connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) - connection.close() - else: - connection.send(( - 'HTTP/1.1 200 OK\r\n' - 'Content-Length: {body_len}\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: close\r\n\r\n{body}'.format(body_len=len(str(num_requests)), body=num_requests) - )) - connection.close() - - -def thread_partial_response(sock): - sock.listen(0) - first = True - num_requests = 0 - while True: - connection, addr = sock.accept() - num_requests += 1 - if first: - connection.send('HTTP/1.1 500 Internal Server Error\r\n') - connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) - connection.close() - first = False - else: - connection.send(( - 'HTTP/1.1 200 OK\r\n' - 'Content-Length: {body_len}\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: close\r\n\r\n{body}'.format(body_len=len(str(num_requests)), body=num_requests) - )) - connection.close() - - -def thread_slow_response(sock): - ''' - Thread to sleep a decreasing amount of time before sending the response - - sleep times: 2 -> 1 -> 0 - ''' - sock.listen(0) - sleep_time = 2 - num_requests = 0 - # poll - while True: - select.select([sock], [], []) - try: - connection, addr = sock.accept() - time.sleep(sleep_time) - connection.send(( - 'HTTP/1.1 200 OK\r\n' - 'Content-Length: {body_len}\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: close\r\n\r\n{body}'.format(body_len=len(str(num_requests)), body=num_requests) - )) - connection.close() - num_requests += 1 - except Exception as e: - print 'connection died!', e - pass - if sleep_time > 0: - sleep_time -= 1 - - -def thread_slow_close(sock): - ''' - Thread to sleep a decreasing amount of time after the request, before closing - - sleep times: 2 -> 1 -> 0 - ''' - sock.listen(0) - sleep_time = 2 - num_requests = 0 - # poll - while True: - select.select([sock], [], []) - try: - connection, addr = sock.accept() - connection.send(( - 'HTTP/1.1 200 OK\r\n' - 'Content-Length: {body_len}\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: close\r\n\r\n{body}'.format(body_len=len(str(num_requests)), body=num_requests) - )) - time.sleep(sleep_time) - connection.close() - num_requests += 1 - except Exception as e: - print 'connection died!', e - pass - if sleep_time > 0: - sleep_time -= 1 - - -class TestOriginServerConnectAttempts(helpers.EnvironmentCase): - @classmethod - def setUpEnv(cls, env): - ''' - This function is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - cls.sock_map = {} - - def _add_sock(name): - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.bind(('127.0.0.1', 0)) - cls.sock_map[name] = sock.getsockname()[1] - cls.configs['remap.config'].add_line('map /{0}/ http://127.0.0.1:{1}/'.format(name, cls.sock_map[name])) - return sock - # create a socket where we just bind - _add_sock('bound') - - # create a socket where we bind + listen - sock = _add_sock('listen') - sock.listen(1) - - # create a bunch of special socket servers - sock = _add_sock('die_on_connect') - t = threading.Thread(target=thread_die_on_connect, args=(sock,)) - t.daemon = True - t.start() - - sock = _add_sock('reset_after_accept') - t = threading.Thread(target=thread_reset_after_accept, args=(sock,)) - t.daemon = True - t.start() - - sock = _add_sock('slow_response') - t = threading.Thread(target=thread_slow_response, args=(sock,)) - t.daemon = True - t.start() - - sock = _add_sock('partial_response') - t = threading.Thread(target=thread_partial_response, args=(sock,)) - t.daemon = True - t.start() - - sock = _add_sock('slow_close') - t = threading.Thread(target=thread_slow_close, args=(sock,)) - t.daemon = True - t.start() - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - - # enable re-connects, timeout of 1s, max retires of 3 - cls.configs['records.config']['CONFIG']['proxy.config.http.connect_attempts_timeout'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.http.connect_attempts_max_retries'] = 3 - - def test_bound_origin(self): - '''Verify that we get 502s from an origin which just did a bind''' - url = 'http://127.0.0.1:{0}/bound/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - ret = requests.get(url, timeout=2) - self.assertEqual(ret.status_code, 502) - - def test_listen_origin(self): - '''Verify that we get 502s from origins that bind + listen''' - url = 'http://127.0.0.1:{0}/listen/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - ret = requests.get(url, timeout=2) - self.assertEqual(ret.status_code, 502) - - url = 'http://127.0.0.1:{0}/listen/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - ret = requests.get(url, timeout=2) - self.assertEqual(ret.status_code, 502) - - def test_die_on_connect_origin(self): - '''Verify that we get 504s from origins that die_on_connect''' - url = 'http://127.0.0.1:{0}/die_on_connect/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - ret = requests.get(url, timeout=2) - self.assertEqual(ret.status_code, 504) - - def test_partial_response_origin(self): - ''' - Verify that we get 504s from origins that return a partial_response - - We want to bail out-- since the origin already got the request, we can't - gaurantee that the request is re-entrant - ''' - url = 'http://127.0.0.1:{0}/partial_response/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - ret = requests.get(url, timeout=2) - self.assertEqual(ret.status_code, 500) - - def test_reset_after_accept_origin(self): - '''Verify that we get 502s from origins that reset_after_accept, once any bytes are sent to origin we assume we cannot re-dispatch''' - url = 'http://127.0.0.1:{0}/reset_after_accept/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - ret = requests.get(url, timeout=2) - self.assertEqual(ret.status_code, 502) - - def test_slow_response(self): - '''Verify that we get 5xx from origins that take longer than acceptable, since we will not retry them''' - url = 'http://127.0.0.1:{0}/slow_response/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - ret = requests.get(url, timeout=2) - # make sure it worked - self.assertEqual(ret.status_code, 504) - - def test_slow_close(self): - '''Verify that we retry connecting to an origin when there is a connection failure''' - url = 'http://127.0.0.1:{0}/slow_close/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - ret = requests.get(url, timeout=2) - # make sure it worked - self.assertEqual(ret.status_code, 200) diff --git a/ci/tsqa/tests/test_custom_log.py b/ci/tsqa/tests/test_custom_log.py deleted file mode 100644 index 7f32b524c67..00000000000 --- a/ci/tsqa/tests/test_custom_log.py +++ /dev/null @@ -1,66 +0,0 @@ -''' -Test custom log field -''' - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import requests -import time -import logging -import random -import tsqa.test_cases -import helpers - -log = logging.getLogger(__name__) - - -class TestCustomLogField(helpers.EnvironmentCase): - ''' - Tests for a customed log field called hii - ''' - @classmethod - def setUpEnv(cls, env): - - cls.configs['remap.config'].add_line( - 'map / http://www.linkedin.com/ @action=deny' - ) - cls.log_file_name = 'test_log_field' - - cls.log_file_path = os.path.join(cls.environment.layout.prefix, 'var/log/test_log_field.log') - cls.log_etc_file = os.path.join(cls.environment.layout.prefix, 'etc/trafficserver/logging.config') - cls.configs['logging.config'].add_line('log.ascii(Format = "% %", Filename = "test_log_field"') - - def ip_to_hex(self, ipstr): - num_list = ipstr.split('.') - int_value = (int(num_list[0]) << 24) + (int(num_list[1]) << 16) + (int(num_list[2]) << 8) + (int(num_list[3])) - return hex(int_value).upper()[2:] - - def test_log_field(self): - random.seed() - times = 10 - for i in xrange(times): - request_ip = "127.%d.%d.%d" % (random.randint(1, 255), random.randint(1, 255), random.randint(1, 255)) - url = 'http://%s:%s' % (request_ip, self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - requests.get(url) - # get the last line of the log file - time.sleep(10) - with open(self.log_file_path) as f: - for line in f: - pass - expected_line = "%s %s\n" % (request_ip, self.ip_to_hex(request_ip)) - self.assertEqual(line, expected_line) diff --git a/ci/tsqa/tests/test_example.py b/ci/tsqa/tests/test_example.py deleted file mode 100644 index 3ba0cf04cdc..00000000000 --- a/ci/tsqa/tests/test_example.py +++ /dev/null @@ -1,185 +0,0 @@ -''' -Some example tests of the new tsqa -''' - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import requests -import time -import subprocess - -import helpers - -import tsqa.test_cases -import tsqa.utils - -# TODO: for some reason subclasses of subclasses of TestCase don't work with the -# decorator -# @helpers.unittest.skip('Not running TestNoOp, as it is a NoOp test') - - -class TestNoOp(helpers.EnvironmentCase): - ''' - This is purely a documentation test - ''' - # you can set dependencies on specific "features" (according to traffic_layout) - # if the requirement isn't met the test is skipped - feature_requirements = {'TS_HAS_WCCP': 0} - - @classmethod - def setUpClass(cls): - ''' - If you'd like to skip an entire test - ''' - # you can also skip (or conditionally skip) tests - raise helpers.unittest.SkipTest('Skip the entire class') - - @classmethod - def setUpEnv(cls, env): - ''' - This funciton is responsible for setting up the environment for this fixture - This includes everything pre-daemon start. - - You are passed in cls (which is the instance of this class) and env (which - is an environment object) - ''' - # we can modify any/all configs (note: all pre-daemon start) - cls.configs['remap.config'].add_line('map / http://trafficserver.readthedocs.org/') - - # Some configs have nicer wrapper objects to give you a more pythonic interface - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.log.squid_log_enabled': 1, - 'proxy.config.log.squid_log_is_ascii': 1, - }) - - def test_something(self): - ''' - All functions beginning with "test_" will be run as tests for the class. - Within these functions your environment is already set up and started-- - you only need to excercise the code that you intend to test - ''' - # for example, you could send a request to ATS and check the response - ret = requests.get('http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) - - self.assertEqual(ret.status_code, 404) - self.assertIn('ATS', ret.headers['server']) - - -class TestConfigureFlags(helpers.EnvironmentCase): - feature_requirements = {'TS_HAS_WCCP': 0} - def test_wccp(self): - self.assertTrue(True) - - -class TestBootstrap(helpers.EnvironmentCase): - def test_default_404(self): - ret = requests.get('http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) - - self.assertEqual(ret.status_code, 404) - self.assertIn('ATS', ret.headers['server']) - - def test_trafficctl(self): - ''' - Test that traffic_ctl works, and verify that the values for proxy.config - match what we put in records.config - ''' - cmd = [os.path.join(self.environment.layout.bindir, 'traffic_ctl'), - 'config', - 'match', - 'proxy.config', - ] - stdout, _ = tsqa.utils.run_sync_command(cmd, stdout=subprocess.PIPE) - for line in stdout.splitlines(): - if not line.strip(): - continue - k, v = line.split(' ', 1) - if k not in self.configs['records.config']['CONFIG']: - continue - r_val = self.configs['records.config']['CONFIG'][k] - self.assertEqual(type(r_val)(v), self.configs['records.config']['CONFIG'][k]) - - -class TestServerIntercept(helpers.EnvironmentCase, tsqa.test_cases.DynamicHTTPEndpointCase): - endpoint_port = 60000 - - @classmethod - def setUpEnv(cls, env): - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}'.format(cls.endpoint_port)) - - cls.configs['plugin.config'].add_line('intercept.so') - - def hello(request): - return 'hello' - cls.http_endpoint.add_handler('/', hello) - - def test_basic_intercept(self): - for _ in xrange(0, 10): - ret = requests.get('http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) - - self.assertEqual(ret.status_code, 200) - - -class TestLogs(helpers.EnvironmentCase): - @classmethod - def setUpEnv(cls, env): - ''' - This funciton is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.tags': 'log-.*', - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.log.hostname': 'test', - }) - - def test_logs_exist(self): - # send some requests - for x in xrange(0, 10): - ret = requests.get('http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) - - self.assertEqual(ret.status_code, 404) - self.assertIn('ATS', ret.headers['server']) - - # TODO: some better way to know when the logs where syncd - time.sleep(10) # wait for logs to hit disk - - # verify that the log files exist - for logfile in ('diags.log', 'error.log', 'squid.blog', 'traffic.out', 'manager.log'): - logfile_path = os.path.join(self.environment.layout.logdir, logfile) - self.assertTrue(os.path.isfile(logfile_path), logfile_path) - - -class TestDynamicHTTPEndpointCase(tsqa.test_cases.DynamicHTTPEndpointCase, helpers.EnvironmentCase): - @classmethod - def setUpEnv(cls, env): - ''' - This funciton is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}/\n'.format(cls.http_endpoint.address[1])) - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - - def test_basic_proxy(self): - ret = requests.get(self.endpoint_url('/test'), - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 404) - self.assertIn('WSGIServer', ret.headers['server']) diff --git a/ci/tsqa/tests/test_header_rewrite.py b/ci/tsqa/tests/test_header_rewrite.py deleted file mode 100644 index ed8d3c3a681..00000000000 --- a/ci/tsqa/tests/test_header_rewrite.py +++ /dev/null @@ -1,130 +0,0 @@ -''' -Test cookie rewrite -''' -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import requests -import time -import logging -import random -import tsqa.test_cases -import helpers -import shutil -import SocketServer -import urllib2 - -log = logging.getLogger(__name__) - -class EchoServerHandler(SocketServer.BaseRequestHandler): - """ - A subclass of RequestHandler which will return all data received back - """ - - def handle(self): - # Receive the data in small chunks and retransmit it - while True: - data = self.request.recv(4096).strip() - if data: - log.debug('Sending data back to the client') - else: - log.debug('Client disconnected') - break - cookie = '' - if 'Cookie' in data: - cookie = data.split('Cookie: ')[1].split('\r\n')[0] - - resp = ('HTTP/1.1 200 OK\r\n' - 'Content-Length: {data_length}\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: keep-alive\r\n' - '\r\n{data_string}'.format( - data_length = len(cookie), - data_string = cookie - )) - self.request.sendall(resp) - -class TestHeaderRewrite(helpers.EnvironmentCase): - ''' - Tests for header rewrite - ''' - @classmethod - def setUpEnv(cls, env): - cls.traffic_server_port = int(cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - - # create a socket server - cls.socket_server = tsqa.endpoint.SocketServerDaemon(EchoServerHandler) - cls.socket_server.start() - cls.socket_server.ready.wait() - - cls.configs['remap.config'].add_line( - 'map / http://127.0.0.1:%d' %(cls.socket_server.port) - ) - - # setup the plugin - cls.config_file = 'header-rewrite.config' - cls.test_config_path = helpers.tests_file_path(cls.config_file) - - cls.configs['plugin.config'].add_line('%s/header_rewrite.so %s' % ( - cls.environment.layout.plugindir, - cls.test_config_path - )) - - def test_cookie_rewrite(self): - - cookie_test_add_dict = { - '' : 'testkey=testaddvalue', - 'testkey=somevalue' : 'testkey=somevalue', - 'otherkey=testvalue' : 'otherkey=testvalue;testkey=testaddvalue', - 'testkey = "other=value"; a = a' : 'testkey = "other=value"; a = a', - 'testkeyx===' : 'testkeyx===;testkey=testaddvalue' - } - for key in cookie_test_add_dict: - opener = urllib2.build_opener() - opener.addheaders.append(('Cookie', key)) - f = opener.open("http://127.0.0.1:%d/addcookie" % (self.traffic_server_port)) - resp = f.read() - self.assertEqual(resp, cookie_test_add_dict[key]) - - cookie_test_rm_dict = { - '' : '', - ' testkey=somevalue' : '', - 'otherkey=testvalue' : 'otherkey=testvalue', - 'testkey = "other=value" ; a = a' : ' a = a', - 'otherkey=othervalue= ; testkey===' : 'otherkey=othervalue= ', - 'firstkey ="firstvalue" ; testkey = =; secondkey=\'\'' : 'firstkey ="firstvalue" ; secondkey=\'\'' - } - for key in cookie_test_rm_dict: - opener = urllib2.build_opener() - opener.addheaders.append(('Cookie', key)) - f = opener.open("http://127.0.0.1:%d/rmcookie" % (self.traffic_server_port)) - resp = f.read() - self.assertEqual(resp, cookie_test_rm_dict[key]) - - cookie_test_set_dict = { - '' : 'testkey=testsetvalue', - 'testkey=somevalue' : 'testkey=testsetvalue', - 'otherkey=testvalue' : 'otherkey=testvalue;testkey=testsetvalue', - 'testkey = "other=value"; a = a' : 'testkey = testsetvalue; a = a', - 'testkeyx===' : 'testkeyx===;testkey=testsetvalue', - 'firstkey ="firstvalue" ; testkey = =; secondkey=\'\'' : 'firstkey ="firstvalue" ; testkey = testsetvalue; secondkey=\'\'' - } - for key in cookie_test_set_dict: - opener = urllib2.build_opener() - opener.addheaders.append(('Cookie', key)) - f = opener.open("http://127.0.0.1:%d/setcookie" % (self.traffic_server_port)) - resp = f.read() - self.assertEqual(resp, cookie_test_set_dict[key]) diff --git a/ci/tsqa/tests/test_headrequest.py b/ci/tsqa/tests/test_headrequest.py deleted file mode 100644 index e19312a0258..00000000000 --- a/ci/tsqa/tests/test_headrequest.py +++ /dev/null @@ -1,115 +0,0 @@ -''' -Test Head Request -''' - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time -import logging -import SocketServer -import tsqa.test_cases -import helpers -import socket - -log = logging.getLogger(__name__) - - -class HeadRequestServerHandler(SocketServer.BaseRequestHandler): - """ - A subclass of RequestHandler which will response to head requests - """ - - def handle(self): - # Receive the data in small chunks and retransmit it - while True: - data = self.request.recv(4096).strip() - if data: - log.debug('Sending data back to the client') - else: - log.debug('Client disconnected') - break - if 'TE' in data: - resp = ('HTTP/1.1 200 OK\r\n' - 'Server: Apache-Coyote/1.1\r\n' - 'Transfer-Encoding: chunked\r\n' - 'Vary: Accept-Encoding\r\n' - '\r\n' - ) - self.request.sendall(resp) - elif 'CL' in data: - resp = ('HTTP/1.1 200 OK\r\n' - 'Server: Apache-Coyote/1.1\r\n' - 'Content-Length: 123\r\n' - 'Vary: Accept-Encoding\r\n' - '\r\n' - ) - self.request.sendall(resp) - else: - resp = ('HTTP/1.1 200 OK\r\n' - 'Server: Apache-Coyote/1.1\r\n' - 'Vary: Accept-Encoding\r\n' - '\r\n' - ) - self.request.sendall(resp) - - -class TestHeadRequestWithoutTimeout(helpers.EnvironmentCase): - ''' - Tests for ATS handling head requests correctly without waiting for the http body - ''' - @classmethod - def setUpEnv(cls, env): - cls.timeout = 5 - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.transaction_no_activity_timeout_out': cls.timeout, - }) - cls.socket_server = tsqa.endpoint.SocketServerDaemon(HeadRequestServerHandler) - cls.socket_server.start() - cls.socket_server.ready.wait() - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}/'.format(cls.socket_server.port)) - log.info('map / http://127.0.0.1:{0}/'.format(cls.socket_server.port)) - - cls.proxy_host = '127.0.0.1' - cls.proxy_port = int(cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - - def test_head_request_without_timout(cls): - request_cases = ['TE', 'CL', ''] - for request_case in request_cases: - begin_time = time.time() - conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - conn.connect((cls.proxy_host, cls.proxy_port)) - request_content = 'HEAD / HTTP/1.1\r\nConnection: close\r\nHost: 127.0.0.1\r\nContent-Length: %d\r\n\r\n%s' % ( - len(request_case), request_case) - conn.setblocking(1) - conn.send(request_content) - while 1: - try: - resp = conn.recv(4096) - if len(resp) == 0: - break - response_content = resp - log.info(resp) - except: - break - conn.shutdown(socket.SHUT_RDWR) - conn.close() - end_time = time.time() - log.info("head request with case(%s) costs %f seconds while the timout is %f seconds." % ( - request_case, end_time - begin_time, cls.timeout)) - cls.assertGreater(cls.timeout, end_time - begin_time) - if request_case == 'CL': - cls.assertIn('Content-Length', response_content) diff --git a/ci/tsqa/tests/test_hostdb.py b/ci/tsqa/tests/test_hostdb.py deleted file mode 100644 index 124a32e1c96..00000000000 --- a/ci/tsqa/tests/test_hostdb.py +++ /dev/null @@ -1,639 +0,0 @@ -''' -Test hostdb -''' - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import requests -import time -import logging -import socket -import SocketServer - -import contextlib -import dnslib -import dnslib.server - -import tsqa.test_cases -import helpers - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def kill_dns(dns_server): - ''' Temporarily kill the dns server - ''' - dns_server.stop() - yield - dns_server.start_thread() - - -class StubDNSResolver(object): - '''Resolver to serve defined responses from `response_dict` or return SOA - ''' - def __init__(self, responses): - self.responses = responses - self.resp_headers = {} - - def resolve(self, request, handler): - reply = request.reply() - for q in request.questions: - qname = str(q.get_qname()) - if qname in self.responses: - for resp in self.responses[qname]: - reply.add_answer(resp) - else: - reply.add_answer(dnslib.server.RR( - q.get_qname(), - rtype=dnslib.QTYPE.SOA, - ttl=1, - rdata=dnslib.dns.SOA( - 'nameserver.local', - q.get_qname(), - ), - )) - for k, v in self.resp_headers.iteritems(): - if k == 'rcode': - reply.header.set_rcode(v) - print 'setting rcode' - else: - log.warning('Unsupported header sent to StubDNSResolver %s' % k) - return reply - - -class EchoServerIpHandler(SocketServer.BaseRequestHandler): - """ - A subclass of RequestHandler which will return a connection uuid - """ - - def handle(self): - # Receive the data in small chunks and retransmit it - while True: - data = self.request.recv(4096).strip() - if data: - log.debug('Sending data back to the client') - else: - log.debug('Client disconnected') - break - resp = ('HTTP/1.1 200 OK\r\n' - 'Content-Length: 0\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: keep-alive\r\n' - 'X-Server-Ip: {server_ip}\r\n' - 'X-Server-Port: {server_port}\r\n' - 'X-Client-Ip: {client_ip}\r\n' - 'X-Client-Port: {client_port}\r\n' - '\r\n'.format( - server_ip=self.request.getsockname()[0], - server_port=self.request.getsockname()[1], - client_ip=self.request.getpeername()[0], - client_port=self.request.getpeername()[1], - )) - self.request.sendall(resp) - - -class TestHostDBBadResolvConf(helpers.EnvironmentCase): - ''' - Test that ATS can handle an empty resolv_conf - ''' - @classmethod - def setUpEnv(cls, env): - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.response_server_enabled': 2, # only add server headers when there weren't any - 'proxy.config.hostdb.lookup_timeout': 2, - 'proxy.config.dns.resolv_conf': '/tmp/non_existant_file', - 'proxy.config.url_remap.remap_required': 0, - - }) - - def test_working(self): - ret = requests.get('http://trafficserver.readthedocs.org', - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 502) - - -class TestHostDBPartiallyFailedDNS(helpers.EnvironmentCase): - ''' - Tests for how hostdb handles when there is one failed and one working resolver - ''' - @classmethod - def setUpEnv(cls, env): - # TODO: Fix this! - # This intermittently fails on Jenkins (such as https://ci.trafficserver.apache.org/job/tsqa-master/387/testReport/test_hostdb/TestHostDBPartiallyFailedDNS/test_working/) - # we aren't sure if this is a failure of ATS or just a race on jenkins (since its slow) - raise helpers.unittest.SkipTest() - - resolv_conf_path = os.path.join(env.layout.prefix, 'resolv.conf') - - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.response_server_enabled': 2, # only add server headers when there weren't any - 'proxy.config.hostdb.lookup_timeout': 2, - 'proxy.config.dns.resolv_conf': resolv_conf_path, - 'proxy.config.url_remap.remap_required': 0, - - }) - - with open(resolv_conf_path, 'w') as fh: - fh.write('nameserver 1.1.1.0\n') # some non-existant nameserver - fh.write('nameserver 8.8.8.8\n') # some REAL nameserver - - def test_working(self): - start = time.time() - ret = requests.get('http://trafficserver.readthedocs.org', - proxies=self.proxies, - ) - self.assertLess(time.time() - start, self.configs['records.config']['CONFIG']['proxy.config.hostdb.lookup_timeout']) - self.assertEqual(ret.status_code, 200) - - -class TestHostDBFailedDNS(helpers.EnvironmentCase): - ''' - Tests for how hostdb handles when there is no reachable resolver - ''' - @classmethod - def setUpEnv(cls, env): - resolv_conf_path = os.path.join(env.layout.prefix, 'resolv.conf') - - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.response_server_enabled': 2, # only add server headers when there weren't any - 'proxy.config.hostdb.lookup_timeout': 2, - 'proxy.config.dns.resolv_conf': resolv_conf_path, - 'proxy.config.url_remap.remap_required': 0, - - }) - - with open(resolv_conf_path, 'w') as fh: - fh.write('nameserver 1.1.1.0\n') # some non-existant nameserver - - def test_lookup_timeout(self): - start = time.time() - ret = requests.get('http://some_nonexistant_domain', - proxies=self.proxies, - ) - self.assertGreater(time.time() - start, self.configs['records.config']['CONFIG']['proxy.config.hostdb.lookup_timeout']) - self.assertEqual(ret.status_code, 502) - self.assertIn('ATS', ret.headers['server']) - - -class TestHostDBHostsFile(helpers.EnvironmentCase, tsqa.test_cases.HTTPBinCase): - ''' - Tests for hostdb's host-file implementation - ''' - @classmethod - def setUpEnv(cls, env): - cls.hosts_file_path = os.path.join(env.layout.prefix, 'hosts') - with open(cls.hosts_file_path, 'w') as fh: - fh.write('127.0.0.1 local\n') - fh.write('127.0.0.2 local2\n') - - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.response_server_enabled': 2, # only add server headers when there weren't any - 'proxy.config.hostdb.lookup_timeout': 2, - 'proxy.config.url_remap.remap_required': 1, - 'proxy.config.http.connect_attempts_max_retries': 1, - 'proxy.config.hostdb.host_file.interval': 1, - 'proxy.config.hostdb.host_file.path': cls.hosts_file_path, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'hostdb', - }) - # create a socket server - cls.socket_server = tsqa.endpoint.SocketServerDaemon(EchoServerIpHandler) - cls.socket_server.start() - cls.socket_server.ready.wait() - cls.configs['remap.config'].add_line('map http://local/ http://local:{0}/'.format(cls.socket_server.port)) - cls.configs['remap.config'].add_line('map http://local2/ http://local2:{0}/'.format(cls.socket_server.port)) - cls.configs['remap.config'].add_line('map http://local3/ http://local3:{0}/'.format(cls.socket_server.port)) - - def test_basic(self): - ''' - Test basic fnctionality of hosts files - ''' - # TODO add stat, then wait for the stat to increment - time.sleep(2) # wait for the continuation to load the hosts file - ret = requests.get( - 'http://local/get', - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 200) - self.assertEqual('127.0.0.1', ret.headers['X-Server-Ip']) - - ret = requests.get( - 'http://local2/get', - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 200) - self.assertEqual('127.0.0.2', ret.headers['X-Server-Ip']) - - def test_reload(self): - ''' - Test that changes to hosts file get loaded within host_file.interval - ''' - # TODO add stat, then wait for the stat to increment - time.sleep(2) # wait for the continuation to load the hosts file - ret = requests.get( - 'http://local3/get', - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 502) - - with open(self.hosts_file_path, 'a') as fh: - fh.write('127.0.0.3 local3\n') - - # TODO add stat, then wait for the stat to increment, with a timeout - time.sleep(2) - - ret = requests.get( - 'http://local3/get', - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 200) - self.assertEqual('127.0.0.3', ret.headers['X-Server-Ip']) - - -class TestHostDB(helpers.EnvironmentCase, tsqa.test_cases.HTTPBinCase): - @classmethod - def setUpEnv(cls, env): - cls.dns_sock = socket.socket (socket.AF_INET, socket.SOCK_DGRAM) - cls.dns_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - cls.dns_sock.bind(('', 0)) # bind to all interfaces on an ephemeral port - dns_port = cls.dns_sock.getsockname()[1] - - # set up dns resolver - cls.responses = { - 'www.foo.com.': dnslib.server.RR.fromZone("foo.com. 1 A 127.0.0.1"), - 'www.stale_for.com.': dnslib.server.RR.fromZone("foo.com. 1 A 127.0.0.1"), - } - - cls.dns_server = dnslib.server.DNSServer( - StubDNSResolver(cls.responses), - port=dns_port, - address="localhost", - ) - cls.dns_server.start_thread() - - cls.hosts_file_path = os.path.join(env.layout.prefix, 'resolv') - with open(cls.hosts_file_path, 'w') as fh: - fh.write('nameserver 127.0.0.1:{0}\n'.format(dns_port)) - - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.response_server_enabled': 2, # only add server headers when there weren't any - 'proxy.config.hostdb.lookup_timeout': 1, - 'proxy.config.url_remap.remap_required': 0, - 'proxy.config.http.connect_attempts_max_retries': 1, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'hostdb', - 'proxy.config.dns.resolv_conf': os.path.join(env.layout.prefix, 'resolv'), - 'proxy.config.hostdb.serve_stale_for': 2, - 'proxy.config.hostdb.ttl_mode': 0, - 'proxy.config.http_ui_enabled': 3, - }) - - cls.configs['remap.config'].add_line('map /_hostdb/ http://{hostdb}') - - def _hostdb_entries(self): - # mapping of name -> entries - ret = {} - showall_ret = requests.get('http://127.0.0.1:{0}/_hostdb/showall?format=json'.format( - self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] - ), timeout=1).json() - - for item in showall_ret: - ret[item['hostname']] = item - - return ret - - def test_dns(self): - '''Test that DNS lookups end up in hostdb as we expect - ''' - # TODO: remove - self.test_basic() - print self._hostdb_entries() - - # test something with a LARGE number of entries - zone_parts = [] - # TODO: fix this, right now there is `#define DNS_MAX_ADDRS 35` which - # controls how many work-- we should make this configurable - # 30 works, once you pass 35 some records are missing, and at some point - # you start getting garbage (50 for example) and at some point (100) it - # seems to crash - NUM_RECORDS = 2 - for x in xrange(0, NUM_RECORDS): - zone_parts.append("www.huge.com. 1 A 127.0.0.{0}".format(x + 1)) - self.responses['www.huge.com.'] = dnslib.server.RR.fromZone('\n'.join(zone_parts)) - - ret = requests.get( - 'http://www.huge.com:{0}/get'.format(self.http_endpoint.address[1]), - proxies=self.proxies, - ) - #self.assertEqual(ret.status_code, 200) - - for item in self._hostdb_entries()['www.huge.com']['rr_records']: - print item['ip'] - - self.assertEqual(len(self._hostdb_entries()['www.huge.com']['rr_records']), NUM_RECORDS) - - - def test_basic(self): - ''' - Test basic fnctionality of resolver - ''' - - # test one that works - ret = requests.get( - 'http://www.foo.com:{0}/get'.format(self.http_endpoint.address[1]), - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 200) - - # check one that doesn't exist - ret = requests.get( - 'http://www.bar.com:{0}/get'.format(self.http_endpoint.address[1]), - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 502) - - def test_serve_stail_for(self): - start = time.time() - ret = requests.get( - 'http://www.stale_for.com:{0}/get'.format(self.http_endpoint.address[1]), - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 200) - # mark the DNSServer down - with kill_dns(self.dns_server): - timeout_at = time.time() + 10 - end_working = None - end = None - count = 0 - - while time.time() < timeout_at: - ret = requests.get( - 'http://www.stale_for.com:{0}/get'.format(self.http_endpoint.address[1]), - proxies=self.proxies, - ) - count += 1 - if ret.status_code != 200: - end = time.time() - break - else: - end_working = time.time() - time.sleep(0.5) - # ensure that it was for at least 2 seconds - print end_working - start - self.assertTrue(end_working - start >= 2) - # TODO: Fix this! - # for whatever reason the failed DNS response is taking ~3.5s to timeout - # even though the hostdb.lookup_timeout is set to 1 (meaning it should be ~1s) - #print end - end_working - #self.assertTrue(end - start >= 2) - - -class TestHostDBSRV(helpers.EnvironmentCase): - '''Tests for SRV records within hostdb - - Tests: - - SRV record - - port overriding - - http/https lookups - - fallback to non SRV - ''' - SS_CONFIG = { - '_http._tcp.www.foo.com.': lambda: tsqa.endpoint.SocketServerDaemon(EchoServerIpHandler), - '_https._tcp.www.foo.com.': lambda: tsqa.endpoint.SSLSocketServerDaemon( - EchoServerIpHandler, - helpers.tests_file_path('cert.pem'), - helpers.tests_file_path('key.pem'), - ), - } - - # TODO: clean up and push into trafficserver-qa - @property - def proxies(self): - ''' - Return a dict of schema -> proxy. This is primarily used for requests - ''' - ret = {} - for item in self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'].split(): - # TODO: better parsing? For now assume first is port - parts = item.split(':') - dst = 'http://127.0.0.1:{0}'.format(parts[0]) - if len(parts) > 1: - if parts[1] not in ret: - ret[parts[1]] = dst - elif 'http' not in ret: - ret['http'] = dst - return ret - - - @classmethod - def setUpEnv(cls, env): - cls.dns_sock = socket.socket (socket.AF_INET, socket.SOCK_DGRAM) - cls.dns_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - cls.dns_sock.bind(('', 0)) # bind to all interfaces on an ephemeral port - dns_port = cls.dns_sock.getsockname()[1] - - # set up dns resolver - cls.responses = { - 'www.foo.com.': dnslib.server.RR.fromZone("foo.com. 1 A 127.0.0.3\nfoo.com. 1 A 127.0.0.2"), - 'www.stale_for.com.': dnslib.server.RR.fromZone("foo.com. 1 A 127.0.0.1"), - } - - cls.dns_server = dnslib.server.DNSServer( - StubDNSResolver(cls.responses), - port=dns_port, - address="localhost", - ) - cls.dns_server.start_thread() - - cls.ssl_port = tsqa.utils.bind_unused_port()[1] - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.ssl_port) - - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.response_server_enabled': 2, # only add server headers when there weren't any - 'proxy.config.hostdb.lookup_timeout': 1, - 'proxy.config.http.connect_attempts_max_retries': 1, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'hostdb', - 'proxy.config.dns.resolv_conf': os.path.join(env.layout.prefix, 'resolv'), - 'proxy.config.hostdb.serve_stale_for': 2, - 'proxy.config.hostdb.ttl_mode': 0, - 'proxy.config.http_ui_enabled': 3, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns_port), - 'proxy.config.srv_enabled': 1, - }) - - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0}'.format( - helpers.tests_file_path('rsa_keys/www.test.com.pem'), - )) - - y = -1 - for name, factory in cls.SS_CONFIG.iteritems(): - y += 1 - ss_dns_results = [] - for x in xrange(0, 3): - ss = factory() - ss.start() - ss.ready.wait() - ss_dns_results.append(dnslib.server.RR( - name, - dnslib.dns.QTYPE.SRV, - rdata = dnslib.dns.SRV( - priority=10, - weight=10, - port=ss.port, - target='127.0.{0}.{1}.'.format(y, x + 1), # note: NUM_REALS must be < 253 - ), - ttl=1, - )) - cls.responses[name] = ss_dns_results - - cls.configs['remap.config'].add_line('map http://www.foo.com/ http://www.foo.com/') - cls.configs['remap.config'].add_line('map https://www.foo.com/ https://www.foo.com/') - cls.configs['remap.config'].add_line('map /_hostdb/ http://{hostdb}') - - def _hostdb_entries(self): - # mapping of name -> entries - ret = {} - showall_ret = requests.get('http://127.0.0.1:{0}/_hostdb/showall?format=json'.format( - self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] - ), timeout=1) - return showall_ret.text - - for item in showall_ret: - ret[item['hostname']] = item - - return ret - - def test_https(self): - '''Test https SRV lookups - - we expect the SRV lookup to get different hosts, but otherwise act the same - ''' - time.sleep(1) - expected_set = set([d.rdata.port for d in self.responses['_https._tcp.www.foo.com.']]) - - actual_set = set() - for x in xrange(0, 10): - # test one that works - ret = requests.get( - 'https://localhost:{0}/'.format(self.ssl_port), - headers={'Host': 'www.foo.com'}, - verify=False, # self signed certs, don't bother verifying - ) - self.assertEqual(ret.status_code, 200) - actual_set.add(int(ret.headers['X-Server-Port'])) - - self.assertEqual(expected_set, actual_set) - - def test_ports(self): - '''Test port functionality of SRV responses - - SRV responses include ports-- so we want to ensure that we are correctly - overriding the port based on the response - ''' - time.sleep(1) - expected_set = set([d.rdata.port for d in self.responses['_http._tcp.www.foo.com.']]) - - actual_set = set() - for x in xrange(0, 10): - # test one that works - ret = requests.get( - 'http://www.foo.com/', - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 200) - actual_set.add(int(ret.headers['X-Server-Port'])) - - self.assertEqual(expected_set, actual_set) - - def test_priority(self): - '''Test port functionality of SRV responses - - SRV responses include ports-- so we want to ensure that we are correctly - overriding the port based on the response - ''' - time.sleep(3) # TODO: clear somehow? waiting for expiry is lame - - NUM_REQUESTS = 10 - orig_responses = self.responses['_http._tcp.www.foo.com.'] - try: - self.responses['_http._tcp.www.foo.com.'][0].rdata.priority=1 - - request_distribution = {} - for x in xrange(0, NUM_REQUESTS): - # test one that works - ret = requests.get( - 'http://www.foo.com/', - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 200) - port = int(ret.headers['X-Server-Port']) - if port not in request_distribution: - request_distribution[port] = 0 - request_distribution[port] += 1 - - # since one has a lower priority, we want to ensure that it got all requests - self.assertEqual( - request_distribution[self.responses['_http._tcp.www.foo.com.'][0].rdata.port], - NUM_REQUESTS, - ) - - finally: - self.responses['_http._tcp.www.foo.com.'] = orig_responses - - def test_weight(self): - '''Test port functionality of SRV responses - - SRV responses include ports-- so we want to ensure that we are correctly - overriding the port based on the response - ''' - time.sleep(3) # TODO: clear somehow? waiting for expiry is lame - - NUM_REQUESTS = 100 - orig_responses = self.responses['_http._tcp.www.foo.com.'] - try: - self.responses['_http._tcp.www.foo.com.'][0].rdata.weight=100 - - request_distribution = {} - for x in xrange(0, NUM_REQUESTS): - # test one that works - ret = requests.get( - 'http://www.foo.com/', - proxies=self.proxies, - ) - self.assertEqual(ret.status_code, 200) - port = int(ret.headers['X-Server-Port']) - if port not in request_distribution: - request_distribution[port] = 0 - request_distribution[port] += 1 - - # since the first one has a significantly higher weight, we expect it to - # take ~10x the traffic of the other 2 - self.assertTrue( - request_distribution[self.responses['_http._tcp.www.foo.com.'][0].rdata.port] > - (NUM_REQUESTS / len(self.responses['_http._tcp.www.foo.com.'])) * 2, - 'Expected significantly more traffic on {0} than the rest: {1}'.format( - self.responses['_http._tcp.www.foo.com.'][0].rdata.port, - request_distribution, - ), - ) - - finally: - self.responses['_http._tcp.www.foo.com.'] = orig_responses diff --git a/ci/tsqa/tests/test_http2.py b/ci/tsqa/tests/test_http2.py deleted file mode 100644 index 7f8be87ba3a..00000000000 --- a/ci/tsqa/tests/test_http2.py +++ /dev/null @@ -1,97 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -import os - -import helpers - -import tsqa.endpoint -import tsqa.test_cases -import tsqa.utils - -try: - import hyper -except ImportError: - raise helpers.unittest.SkipTest('Cannot import hyper, skipping tests for HTTP/2') - -log = logging.getLogger(__name__) - - -class TestHTTP2(helpers.EnvironmentCase, tsqa.test_cases.HTTPBinCase): - @classmethod - def setUpEnv(cls, env): - ''' - Setting up environment for testing of HTTP2 - ''' - # get HTTP/2 server ports - cls.http2_port = tsqa.utils.bind_unused_port()[1] - - # HTTP2 configs - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.http2_port) - cls.configs['records.config']['CONFIG']['proxy.config.ssl.server.cert.path'] = helpers.tests_file_path('rsa_keys') - cls.configs['records.config']['CONFIG']['proxy.config.diags.debug.enabled'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.diags.debug.tags'] = 'http2.*|ssl.*' - - # configure SSL multicert - cls.configs['ssl_multicert.config'].add_line( - 'dest_ip=* ssl_cert_name={0}\n'.format(helpers.tests_file_path('rsa_keys/www.example.com.pem')) - ) - - # remap configs - cls.configs['remap.config'].add_line( - 'map / http://127.0.0.1:{0}/'.format(cls.http_endpoint.address[1]) - ) - - # Turn off certificate verification for the tests. - # hyper-0.4.0 verify certs in default and can't turn it off without below hack:( - hyper.tls._context = hyper.tls.init_context() - hyper.tls._context.check_hostname = False - hyper.tls._context.verify_mode = hyper.compat.ssl.CERT_NONE - - def __cat(self, target_file_path): - ''' - Cat given file - ''' - for line in open(target_file_path).readlines(): - log.debug(line[:-1]) - - def __traffic_out(self): - ''' - Cat traffic.out - ''' - self.__cat(os.path.join(self.environment.layout.logdir, 'traffic.out')) - - def __diags_log(self): - ''' - Cat diags.log - ''' - self.__cat(os.path.join(self.environment.layout.logdir, 'diags.log')) - - def test_http2_request_hyper(self): - ''' - Test HTTP/2 w/ hyper (Normal Scenario) - ''' - try: - conn = hyper.HTTPConnection('127.0.0.1', self.http2_port, secure=True) - stream_id = conn.request('GET', '/') - ret = conn.get_response() - - self.assertNotEqual(stream_id, None) - self.assertEqual(ret.status, 200) - except Exception as e: - log.error(e) - self.__traffic_out() - self.__diags_log() diff --git a/ci/tsqa/tests/test_http2_spec.py b/ci/tsqa/tests/test_http2_spec.py deleted file mode 100644 index 915ca1a8180..00000000000 --- a/ci/tsqa/tests/test_http2_spec.py +++ /dev/null @@ -1,115 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Note: This test case uses h2spec. Please install it yourself. -# https://github.com/summerwind/h2spec -# - -import logging -import os -import subprocess - -import helpers - -import tsqa.endpoint -import tsqa.test_cases -import tsqa.utils - -log = logging.getLogger(__name__) - - -# helper function to get h2spec path -def which(program): - def is_exe(fpath): - return os.path.isfile(fpath) and os.access(fpath, os.X_OK) - fpath, fname = os.path.split(program) - if fpath: - if is_exe(program): - return program - else: - for path in os.environ["PATH"].split(os.pathsep): - path = path.strip('"') - exe_file = os.path.join(path, program) - if is_exe(exe_file): - return exe_file - return None - - -class TestH2Spec(helpers.EnvironmentCase, tsqa.test_cases.HTTPBinCase): - @classmethod - def setUpEnv(cls, env): - ''' - Setting up environment for testing of HTTP2 - ''' - # get path to h2spec - cls.h2spec = which('h2spec') - if cls.h2spec is None: - raise helpers.unittest.SkipTest('Cannot find h2spec. skipping test.') - - # get HTTP/2 server ports - cls.http2_port = tsqa.utils.bind_unused_port()[1] - - # HTTP2 configs - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.http2_port) - cls.configs['records.config']['CONFIG']['proxy.config.ssl.server.cert.path'] = helpers.tests_file_path('rsa_keys') - cls.configs['records.config']['CONFIG']['proxy.config.diags.debug.enabled'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.diags.debug.tags'] = 'http2.*|ssl.*' - - # configure SSL multicert - cls.configs['ssl_multicert.config'].add_line( - 'dest_ip=* ssl_cert_name={0}\n'.format(helpers.tests_file_path('rsa_keys/www.example.com.pem')) - ) - - # remap configs - cls.configs['remap.config'].add_line( - 'map / http://127.0.0.1:{0}/'.format(cls.http_endpoint.address[1]) - ) - - def __callH2Spec(self, section=None): - ''' - Call h2spec - ''' - args = [self.h2spec, '-h', 'localhost', '-p', str(self.http2_port), '-t', '-k'] - if section is not None: - args.extend(['-s', section]) - - log.info('full args = {0}'.format(args)) - p = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stdin=subprocess.PIPE, - ) - self.stdout, self.stderr = p.communicate() - log.info('\n' + self.stdout) - - return p.returncode - - def test_http2_spec_section(self): - ''' - Test HTTP/2 w/ h2spec (Exceptional Scenario) - ''' - sections = ['3.5', '4.2', '5.1', '5.3.1', '5.4.1', '5.5', '6.1', '6.2', '6.3', '6.4', '6.5', '6.5.2', '6.7', '6.8', - '6.9', '6.9.1', '6.10', '8.1', '8.1.2', '8.1.2.2', '8.1.2.3', '8.1.2.6', '8.2'] - for section in sections: - self.__callH2Spec(section) - self.assertIn('All tests passed', self.stdout, 'Failed at section %s of RFC7540' % section) - - # TODO these tests cannot pass currently. move to above after ATS can pass them - failing_sections = ['4.3'] - for section in failing_sections: - self.__callH2Spec(section) - self.assertNotIn('All tests passed', self.stdout, 'Failed at section %s of RFC7540' % section) diff --git a/ci/tsqa/tests/test_https.py b/ci/tsqa/tests/test_https.py deleted file mode 100644 index 8e45d6ed76e..00000000000 --- a/ci/tsqa/tests/test_https.py +++ /dev/null @@ -1,345 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from OpenSSL import SSL -import socket -import time -import helpers -import tsqa.utils -import os -import logging -unittest = tsqa.utils.import_unittest() - -log = logging.getLogger(__name__) -# some ciphers to test with -CIPHER_MAP = { - 'rsa': 'ECDHE-RSA-AES256-GCM-SHA384', - 'ecdsa': 'ECDHE-ECDSA-AES256-GCM-SHA384', -} - - -class CertSelectionMixin(object): - def _get_cert(self, addr, sni_name=None, ciphers=None): - ''' - Return the certificate for addr. Optionally sending sni_name - ''' - ctx = SSL.Context(SSL.TLSv1_2_METHOD) - # Set up client - sock = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM)) - sock.connect(addr) - if sni_name is not None: - sock.set_tlsext_host_name(sni_name) - if ciphers is not None: - ctx.set_cipher_list(ciphers) - sock.do_handshake() - return sock.get_peer_certificate() - - def _get_cert_chain(self, addr, sni_name=None, ciphers=None): - ''' - Return the certificate chain for addr. Optionally sending sni_name - ''' - ctx = SSL.Context(SSL.TLSv1_2_METHOD) - # Set up client - sock = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM)) - sock.connect(addr) - if sni_name is not None: - sock.set_tlsext_host_name(sni_name) - if ciphers is not None: - ctx.set_cipher_list(ciphers) - sock.do_handshake() - return sock.get_peer_cert_chain() - - def test_star_ordering(self): - ''' - We should be served the first match, since we aren't sending SNI headers - ''' - addr = ('127.0.0.1', self.ssl_port) - cert = self._get_cert(addr) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_star_sni(self): - ''' - Make sure we get the certificate we asked for if we pass in SNI headers - ''' - addr = ('127.0.0.1', self.ssl_port) - cert = self._get_cert(addr, sni_name='www.test.com') - self.assertEqual(cert.get_subject().commonName.decode(), 'www.test.com') - - cert = self._get_cert(addr, sni_name='www.example.com') - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_ip_ordering(self): - ''' - We should be served the first match, since we aren't sending SNI headers - ''' - addr = ('127.0.0.2', self.ssl_port) - cert = self._get_cert(addr) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_ip_sni(self): - ''' - Make sure we get the certificate we asked for if we pass in SNI headers - ''' - addr = ('127.0.0.2', self.ssl_port) - cert = self._get_cert(addr, sni_name='www.test.com') - self.assertEqual(cert.get_subject().commonName.decode(), 'www.test.com') - - cert = self._get_cert(addr, sni_name='www.example.com') - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def _intermediate_ca_t(self, cipher): - ''' - Method for testing intermediate CAs. We assume that www.example.com should - return a certificate chaing of len 2 which includes intermediate. - We also assume that www.test.com returns a single cert in the chain which - is *not* intermediate - ''' - # send a request that *should* get an intermediate CA - addr = ('127.0.0.1', self.ssl_port) - cert_chain = self._get_cert_chain(addr, ciphers=CIPHER_MAP[cipher]) - self.assertEqual(len(cert_chain), 2) - self.assertEqual(cert_chain[0].get_subject().commonName.decode(), 'www.example.com') - self.assertEqual(cert_chain[1].get_subject().commonName.decode(), 'intermediate') - - # send a request that shouldn't get an intermediate CA - addr = ('127.0.0.1', self.ssl_port) - cert_chain = self._get_cert_chain(addr, ciphers=CIPHER_MAP[cipher], sni_name='www.test.com') - self.assertEqual(len(cert_chain), 1) - self.assertEqual(cert_chain[0].get_subject().commonName.decode(), 'www.test.com') - - -class TestRSA(helpers.EnvironmentCase, CertSelectionMixin): - ''' - Tests for https for ATS configured with RSA certificates - ''' - @classmethod - def setUpEnv(cls, env): - # add an SSL port to ATS - cls.ssl_port = tsqa.utils.bind_unused_port()[1] - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.ssl_port) - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cipher_suite': CIPHER_MAP['rsa'], - }) - - # configure SSL multicert - cls.configs['ssl_multicert.config'].add_line('dest_ip=127.0.0.2 ssl_cert_name={0} ssl_ca_name={1}'.format( - helpers.tests_file_path('rsa_keys/www.example.com.pem'), - helpers.tests_file_path('rsa_keys/intermediate.crt'), - )) - cls.configs['ssl_multicert.config'].add_line('dest_ip=127.0.0.2 ssl_cert_name={0}'.format( - helpers.tests_file_path('rsa_keys/www.test.com.pem'), - )) - - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0} ssl_ca_name={1}'.format( - helpers.tests_file_path('rsa_keys/www.example.com.pem'), - helpers.tests_file_path('rsa_keys/intermediate.crt'), - )) - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0}'.format( - helpers.tests_file_path('rsa_keys/www.test.com.pem'), - )) - - def test_rsa(self): - addr = ('127.0.0.1', self.ssl_port) - cert = self._get_cert(addr, ciphers=CIPHER_MAP['rsa']) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_ecdsa(self): - addr = ('127.0.0.1', self.ssl_port) - with self.assertRaises(Exception): - cert = self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_intermediate_ca_rsa(self): - self._intermediate_ca_t('rsa') - - def test_intermediate_ca_ecdsa(self): - with self.assertRaises(Exception): - self._intermediate_ca_t('ecdsa') - - -class TestECDSA(helpers.EnvironmentCase, CertSelectionMixin): - ''' - Tests for https for ATS configured with ECDSA certificates - ''' - @classmethod - def setUpEnv(cls, env): - # add an SSL port to ATS - cls.ssl_port = tsqa.utils.bind_unused_port()[1] - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.ssl_port) - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cipher_suite': CIPHER_MAP['ecdsa'], - }) - - # configure SSL multicert - cls.configs['ssl_multicert.config'].add_line('dest_ip=127.0.0.2 ssl_cert_name={0} ssl_ca_name={1}'.format( - helpers.tests_file_path('ec_keys/www.example.com.pem'), - helpers.tests_file_path('ec_keys/intermediate.crt'), - )) - cls.configs['ssl_multicert.config'].add_line('dest_ip=127.0.0.2 ssl_cert_name={0}'.format( - helpers.tests_file_path('ec_keys/www.test.com.pem'), - )) - - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0} ssl_ca_name={1}'.format( - helpers.tests_file_path('ec_keys/www.example.com.pem'), - helpers.tests_file_path('ec_keys/intermediate.crt'), - )) - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0}'.format( - helpers.tests_file_path('ec_keys/www.test.com.pem'), - )) - - def test_rsa(self): - addr = ('127.0.0.1', self.ssl_port) - with self.assertRaises(Exception): - cert = self._get_cert(addr, ciphers=CIPHER_MAP['rsa']) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_ecdsa(self): - addr = ('127.0.0.1', self.ssl_port) - cert = self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_intermediate_ca_rsa(self): - with self.assertRaises(Exception): - self._intermediate_ca_t('rsa') - - def test_intermediate_ca_ecdsa(self): - self._intermediate_ca_t('ecdsa') - - -class TestMix(helpers.EnvironmentCase, CertSelectionMixin): - ''' - Tests for https for ATS configured with both ECDSA and RSA certificates - ''' - @classmethod - def setUpEnv(cls, env): - # Temporarily skipping TestMix until we can figure out how to specify underlying open ssl versions - # The behaviour of the intermediate cert chains depends on openssl version - raise helpers.unittest.SkipTest('Skip TestMix until we figure out openssl version tracking'); - # add an SSL port to ATS - cls.ssl_port = tsqa.utils.bind_unused_port()[1] - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.ssl_port) - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cipher_suite': '{0}:{1}'.format(CIPHER_MAP['ecdsa'], CIPHER_MAP['rsa']), - }) - - # configure SSL multicert - cls.configs['ssl_multicert.config'].add_line('dest_ip=127.0.0.2 ssl_cert_name={0},{1} ssl_ca_name={2},{3}'.format( - helpers.tests_file_path('rsa_keys/www.example.com.pem'), - helpers.tests_file_path('ec_keys/www.example.com.pem'), - helpers.tests_file_path('rsa_keys/intermediate.crt'), - helpers.tests_file_path('ec_keys/intermediate.crt'), - )) - cls.configs['ssl_multicert.config'].add_line('dest_ip=127.0.0.2 ssl_cert_name={0},{1}'.format( - helpers.tests_file_path('rsa_keys/www.test.com.pem'), - helpers.tests_file_path('ec_keys/www.test.com.pem'), - )) - - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0},{1} ssl_ca_name={2},{3}'.format( - helpers.tests_file_path('rsa_keys/www.example.com.pem'), - helpers.tests_file_path('ec_keys/www.example.com.pem'), - helpers.tests_file_path('rsa_keys/intermediate.crt'), - helpers.tests_file_path('ec_keys/intermediate.crt'), - )) - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0},{1}'.format( - helpers.tests_file_path('rsa_keys/www.test.com.pem'), - helpers.tests_file_path('ec_keys/www.test.com.pem'), - )) - - def test_rsa(self): - addr = ('127.0.0.1', self.ssl_port) - cert = self._get_cert(addr, ciphers=CIPHER_MAP['rsa']) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_ecdsa(self): - addr = ('127.0.0.1', self.ssl_port) - cert = self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.example.com') - - def test_intermediate_ca_rsa(self): - self._intermediate_ca_t('rsa') - - def test_intermediate_ca_ecdsa(self): - self._intermediate_ca_t('ecdsa') - - -class TestConfigFileGroup(helpers.EnvironmentCase, CertSelectionMixin): - ''' - Tests for config file group with https - The config file group includes a parent file ssl_multicert.config and some children files. - when the content of a child file is updated but the file name hasn't been changed. - The behavior is the same as the parent file in the group has been changed. - In the test, a child file named www.unknown.com.pem, which is rsa_keys/www.test.com.pem at first, - is updated to ec_keys/www.test.com.pem. - The difference can be told by different results from calling get_cert() with different ciphers as paramters - ''' - @classmethod - def setUpEnv(cls, env): - # add an SSL port to ATS - cls.ssl_port = tsqa.utils.bind_unused_port()[1] - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.ssl_port) - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cipher_suite': '{0}:{1}'.format(CIPHER_MAP['ecdsa'], CIPHER_MAP['rsa']), - }) - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0},{1} ssl_ca_name={2},{3}'.format( - helpers.tests_file_path('rsa_keys/www.example.com.pem'), - helpers.tests_file_path('ec_keys/www.example.com.pem'), - helpers.tests_file_path('rsa_keys/intermediate.crt'), - helpers.tests_file_path('ec_keys/intermediate.crt'), - )) - cls.configs['ssl_multicert.config'].add_line('dest_ip=127.0.0.3 ssl_cert_name={0}'.format( - helpers.tests_file_path('www.unknown.com.pem'), - )) - os.system('cp %s %s' % (helpers.tests_file_path('rsa_keys/www.test.com.pem'), helpers.tests_file_path('www.unknown.com.pem'))) - log.info('cp %s %s' % (helpers.tests_file_path('rsa_keys/www.test.com.pem'), helpers.tests_file_path('www.unknown.com.pem'))) - - def test_config_file_group(self): - traffic_ctl = os.path.join(self.environment.layout.bindir, 'traffic_ctl') - signal_cmd = [traffic_ctl, 'config', 'reload'] - addr = ('127.0.0.3', self.ssl_port) - cert = self._get_cert(addr, ciphers=CIPHER_MAP['rsa']) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.test.com') - with self.assertRaises(Exception): - self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) - time.sleep(5) - os.system('cp %s %s' % (helpers.tests_file_path('ec_keys/www.test.com.pem'), helpers.tests_file_path('www.unknown.com.pem'))) - log.info('cp %s %s' % (helpers.tests_file_path('ec_keys/www.test.com.pem'), helpers.tests_file_path('www.unknown.com.pem'))) - os.system(' '.join(signal_cmd)) - log.info(signal_cmd) - # waiting for the reconfiguration completed - sec = 0 - while True: - time.sleep(5) - sec += 5 - log.info("reloading: %d seconds" % (sec)) - self.assertLess(sec, 30) - try: - self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) - break - except: - continue - cert = self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) - self.assertEqual(cert.get_subject().commonName.decode(), 'www.test.com') - with self.assertRaises(Exception): - self._get_cert(addr, ciphers=CIPHER_MAP['rsa']) - os.system('rm %s' %(helpers.tests_file_path('www.unknown.com.pem'))) diff --git a/ci/tsqa/tests/test_keepalive.py b/ci/tsqa/tests/test_keepalive.py deleted file mode 100644 index 9a675078fca..00000000000 --- a/ci/tsqa/tests/test_keepalive.py +++ /dev/null @@ -1,481 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid -import requests -import time -import logging -import socket - -import helpers - -import tsqa.test_cases -import tsqa.utils -import tsqa.endpoint - -log = logging.getLogger(__name__) - -import SocketServer - - -class KeepaliveTCPHandler(SocketServer.BaseRequestHandler): - """ - A subclass of RequestHandler which will return a connection uuid - """ - - def handle(self): - # Receive the data in small chunks and retransmit it - start = time.time() - conn_id = uuid.uuid4().hex - while True: - now = time.time() - start - data = self.request.recv(4096).strip() - if data: - log.debug('Sending data back to the client: {uid}'.format(uid=conn_id)) - else: - log.debug('Client disconnected: {timeout}seconds'.format(timeout=now)) - break - body = conn_id - resp = ('HTTP/1.1 200 OK\r\n' - 'Content-Length: {content_length}\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: keep-alive\r\n' - '\r\n' - '{body}'.format(content_length=len(body), body=body)) - self.request.sendall(resp) - - -class KeepAliveInMixin(object): - """Mixin for keep alive in. - - TODO: Allow protocol to be specified for ssl traffic - """ - def _get_socket(self): - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect(('127.0.0.1', int(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']))) - return s - - def _headers_to_str(self, headers): - if headers is None: - headers = {} - request = '' - for k, v in headers.iteritems(): - request += '{0}: {1}\r\n'.format(k, v) - return request - - def _aux_KA_working_path_connid(self, protocol, headers=None): - if headers is None: - headers = {} - with requests.Session() as s: - url = '{0}://127.0.0.1:{1}/'.format(protocol, int(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) - conn_id = None - for x in xrange(1, 10): - ret = s.get(url, headers=headers) - self.assertEqual(ret.status_code, 200) - if conn_id is None: - conn_id = ret.text - else: - self.assertEqual(ret.text, conn_id) - - def _aux_working_path(self, protocol, headers=None): - # connect tcp - s = self._get_socket() - - request = ('GET /exists/ HTTP/1.1\r\n' - 'Host: foobar.com\r\n') - request += self._headers_to_str(headers) - request += '\r\n' - - for x in xrange(1, 10): - s.send(request) - response = s.recv(4096) - # cheat, since we know what the body should have - if not response.endswith('hello'): - response += s.recv(4096) - self.assertIn('HTTP/1.1 200 OK', response) - self.assertIn('hello', response) - - def _aux_error_path(self, protocol, headers=None): - # connect tcp - s = self._get_socket() - - request = ('GET / HTTP/1.1\r\n' - 'Host: foobar.com\r\n') - request += self._headers_to_str(headers) - request += '\r\n' - for x in xrange(1, 10): - s.send(request) - response = s.recv(4096) - self.assertIn('HTTP/1.1 404 Not Found on Accelerator', response) - - def _aux_error_path_post(self, protocol, headers=None): - ''' - Ensure that sending a request with a body doesn't break the keepalive session - ''' - # connect tcp - s = self._get_socket() - - request = ('POST / HTTP/1.1\r\n' - 'Host: foobar.com\r\n' - 'Content-Length: 10\r\n') - request += self._headers_to_str(headers) - request += '\r\n' - request += '1234567890' - - for x in xrange(1, 10): - try: - s.send(request) - except IOError: - s = self._get_socket() - s.send(request) - - response = s.recv(4096) - # Check if client disconnected - if response: - self.assertIn('HTTP/1.1 404 Not Found on Accelerator', response) - - -class BasicTestsOutMixin(object): - - def _aux_KA_origin(self, protocol, headers=None): - ''' - Test that the origin does in fact support keepalive - ''' - conn_id = None - with requests.Session() as s: - url = '{0}://127.0.0.1:{1}/'.format(protocol, self.socket_server.port) - for x in xrange(1, 10): - ret = s.get(url, verify=False, headers=headers) - if not conn_id: - conn_id = ret.text.strip() - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.text.strip(), conn_id, "Client reports server closed connection") - - def _aux_KA_proxy(self, protocol, headers=None): - ''' - Test that keepalive works through ATS to that origin - ''' - url = '{0}://127.0.0.1:{1}'.format( - protocol, - self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'], - ) - conn_id = None - for x in xrange(1, 10): - ret = requests.get(url, verify=False, headers=headers) - if not conn_id: - conn_id = ret.text.strip() - self.assertEqual(ret.status_code, 200) - self.assertEqual(ret.text.strip(), conn_id, "Client reports server closed connection") - - -class TimeoutOutMixin(object): - - def _aux_KA_timeout_direct(self, protocol): - '''Tests that origin does not timeout using keepalive.''' - with requests.Session() as s: - url = '{0}://127.0.0.1:{1}/'.format(protocol, self.socket_server.port) - conn_id = None - for x in xrange(0, 3): - ret = s.get(url, verify=False) - if not conn_id: - conn_id = ret.text.strip() - self.assertEqual(ret.text.strip(), conn_id, "Client reports server closed connection") - time.sleep(3) - - def _aux_KA_timeout_proxy(self, protocol): - '''Tests that keepalive timeout is honored through ATS to origin.''' - url = '{0}://127.0.0.1:{1}'.format( - protocol, - self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'], - ) - conn_id = None - for x in xrange(0, 3): - ret = requests.get(url, verify=False) - if not conn_id: - conn_id = ret.text.strip() - self.assertEqual(ret.text.strip(), conn_id, "Client reports server closed connection") - time.sleep(3) - - -class OriginMinMaxMixin(object): - - def _aux_KA_min_origin(self, protocol): - '''Tests that origin_min_keep_alive_connections is honored.''' - url = '{0}://127.0.0.1:{1}'.format( - protocol, - self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'], - ) - ret = requests.get(url, verify=False) - conn_id = ret.text.strip() - time.sleep(3) - ret = requests.get(url, verify=False) - self.assertEqual(ret.text.strip(), conn_id, "Client reports server closed connection") - - -class TestKeepAliveInHTTP(tsqa.test_cases.DynamicHTTPEndpointCase, helpers.EnvironmentCase, KeepAliveInMixin): - @classmethod - def setUpEnv(cls, env): - - def hello(request): - return 'hello' - cls.http_endpoint.add_handler('/exists/', hello) - - cls.configs['remap.config'].add_line('map /exists/ http://127.0.0.1:{0}/exists/'.format(cls.http_endpoint.address[1])) - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_enabled_in'] = 1 - cls.configs['records.config']['CONFIG']['share_server_session'] = 2 - - # set only one ET_NET thread (so we don't have to worry about the per-thread pools causing issues) - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.limit'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.autoconfig'] = 0 - - def test_working_path(self): - self._aux_working_path("http") - - def test_error_path(self): - self._aux_error_path("http") - - def test_error_path_post(self): - ''' - Ensure that sending a request with a body doesn't break the keepalive session - ''' - self._aux_error_path_post("http") - - -class TestKeepAliveOriginConnOutHTTP(helpers.EnvironmentCase, OriginMinMaxMixin): - @classmethod - def setUpEnv(cls, env): - ''' - This function is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - # create a socket server - cls.socket_server = tsqa.endpoint.SocketServerDaemon(KeepaliveTCPHandler) - cls.socket_server.start() - cls.socket_server.ready.wait() - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}/'.format(cls.socket_server.port)) - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_enabled_out'] = 1 - cls.configs['records.config']['CONFIG']['share_server_session'] = 2 - - # set only one ET_NET thread (so we don't have to worry about the per-thread pools causing issues) - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.limit'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.autoconfig'] = 0 - - # Timeouts - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_no_activity_timeout_out'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.http.transaction_no_activity_timeout_out'] = 1 - - cls.configs['records.config']['CONFIG']['proxy.config.http.origin_min_keep_alive_connections'] = 1 - - def test_KA_min_origin(self): - '''Tests that origin_min_keep_alive_connections is honored via http.''' - self._aux_KA_min_origin("http") - - -class TestKeepAliveOriginConnOutHTTPS(helpers.EnvironmentCase, OriginMinMaxMixin): - @classmethod - def setUpEnv(cls, env): - ''' - This function is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - # create a socket server - cls.socket_server = tsqa.endpoint.SSLSocketServerDaemon( - KeepaliveTCPHandler, - helpers.tests_file_path('cert.pem'), - helpers.tests_file_path('key.pem'), - ) - cls.socket_server.start() - cls.socket_server.ready.wait() - cls.configs['remap.config'].add_line('map / https://127.0.0.1:{0}/\n'.format(cls.socket_server.port)) - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_enabled_out'] = 1 - cls.configs['records.config']['CONFIG']['share_server_session'] = 2 - - # set only one ET_NET thread (so we don't have to worry about the per-thread pools causing issues) - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.limit'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.autoconfig'] = 0 - - # Timeouts - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_no_activity_timeout_out'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.http.transaction_no_activity_timeout_out'] = 1 - - cls.configs['records.config']['CONFIG']['proxy.config.http.origin_min_keep_alive_connections'] = 1 - - def test_KA_min_origin(self): - '''Tests that origin_min_keep_alive_connections is honored via https.''' - self._aux_KA_min_origin("http") - - -class TestKeepAliveOutHTTP(helpers.EnvironmentCase, BasicTestsOutMixin, TimeoutOutMixin): - @classmethod - def setUpEnv(cls, env): - ''' - This function is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - # create a socket server - cls.socket_server = tsqa.endpoint.SocketServerDaemon(KeepaliveTCPHandler) - cls.socket_server.start() - cls.socket_server.ready.wait() - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}/'.format(cls.socket_server.port)) - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_enabled_out'] = 1 - cls.configs['records.config']['CONFIG']['share_server_session'] = 2 - - # set only one ET_NET thread (so we don't have to worry about the per-thread pools causing issues) - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.limit'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.autoconfig'] = 0 - - # Timeouts - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_no_activity_timeout_out'] = 10 - cls.configs['records.config']['CONFIG']['proxy.config.http.transaction_no_activity_timeout_out'] = 2 - - def test_KA_origin(self): - '''Test that the origin does in fact support keepalive via http.''' - self._aux_KA_origin("http") - - def test_KA_proxy(self): - '''Tests that keepalive works through ATS to origin via http.''' - self._aux_KA_proxy("http") - - def test_KA_timeout_direct(self): - '''Tests that origin does not timeout using keepalive via http.''' - self._aux_KA_timeout_direct("http") - - def test_KA_timeout_proxy(self): - '''Tests that keepalive timeout is honored through ATS to origin via http.''' - self._aux_KA_timeout_proxy("http") - - -class TestKeepAliveOutHTTPS(helpers.EnvironmentCase, BasicTestsOutMixin, TimeoutOutMixin): - @classmethod - def setUpEnv(cls, env): - ''' - This function is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - # create a socket server - cls.socket_server = tsqa.endpoint.SSLSocketServerDaemon( - KeepaliveTCPHandler, - helpers.tests_file_path('cert.pem'), - helpers.tests_file_path('key.pem'), - ) - cls.socket_server.start() - cls.socket_server.ready.wait() - cls.configs['remap.config'].add_line('map / https://127.0.0.1:{0}/\n'.format(cls.socket_server.port)) - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_enabled_out'] = 1 - cls.configs['records.config']['CONFIG']['share_server_session'] = 2 - - # set only one ET_NET thread (so we don't have to worry about the per-thread pools causing issues) - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.limit'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.autoconfig'] = 0 - - # Timeouts - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_no_activity_timeout_out'] = 10 - cls.configs['records.config']['CONFIG']['proxy.config.http.transaction_no_activity_timeout_out'] = 2 - - def test_KA_origin(self): - '''Test that the origin does in fact support keepalive via https.''' - self._aux_KA_origin("https") - - def test_KA_proxy(self): - '''Tests that keepalive works through ATS to origin via https.''' - self._aux_KA_proxy("http") - - def test_KA_timeout_direct(self): - '''Tests that origin does not timeout using keepalive via https.''' - self._aux_KA_timeout_direct("https") - - def test_KA_timeout_proxy(self): - '''Tests that keepalive timeout is honored through ATS to origin via https.''' - self._aux_KA_timeout_proxy("http") - - -# TODO: refactor these tests, these are *very* similar, we should paramatarize them -# Some basic tests for auth_sever_session_private -class TestKeepAlive_Authorization_private(helpers.EnvironmentCase, BasicTestsOutMixin, KeepAliveInMixin): - @classmethod - def setUpEnv(cls, env): - - cls.socket_server = tsqa.endpoint.SocketServerDaemon(KeepaliveTCPHandler) - cls.socket_server.start() - cls.socket_server.ready.wait() - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}/exists/'.format(cls.socket_server.port)) - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_enabled_in'] = 1 - cls.configs['records.config']['CONFIG']['share_server_session'] = 2 - - # set only one ET_NET thread (so we don't have to worry about the per-thread pools causing issues) - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.limit'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.autoconfig'] = 0 - - # make auth sessions private - cls.configs['records.config']['CONFIG']['proxy.config.auth_server_session_private'] = 1 - - def test_KA_server(self): - '''Tests that keepalive works through ATS to origin via https.''' - with self.assertRaises(AssertionError): - self._aux_KA_proxy("http", headers={'Authorization': 'Foo'}) - - def test_KA_client(self): - '''Tests that keepalive works through ATS to origin via https.''' - with self.assertRaises(AssertionError): - self._aux_KA_working_path_connid("http", headers={'Authorization': 'Foo'}) - - -class TestKeepAlive_Authorization_no_private(helpers.EnvironmentCase, BasicTestsOutMixin, KeepAliveInMixin): - @classmethod - def setUpEnv(cls, env): - - cls.socket_server = tsqa.endpoint.SocketServerDaemon(KeepaliveTCPHandler) - cls.socket_server.start() - cls.socket_server.ready.wait() - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}/exists/'.format(cls.socket_server.port)) - - # only add server headers when there weren't any - cls.configs['records.config']['CONFIG']['proxy.config.http.response_server_enabled'] = 2 - cls.configs['records.config']['CONFIG']['proxy.config.http.keep_alive_enabled_in'] = 1 - cls.configs['records.config']['CONFIG']['share_server_session'] = 2 - - # set only one ET_NET thread (so we don't have to worry about the per-thread pools causing issues) - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.limit'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.exec_thread.autoconfig'] = 0 - - # make auth sessions private - cls.configs['records.config']['CONFIG']['proxy.config.http.auth_server_session_private'] = 0 - - def test_KA_server(self): - '''Tests that keepalive works through ATS to origin via https.''' - self._aux_KA_proxy("http", headers={'Authorization': 'Foo'}) - - def test_KA_client(self): - '''Tests that keepalive works through ATS to origin via https.''' - self._aux_KA_working_path_connid("http", headers={'Authorization': 'Foo'}) diff --git a/ci/tsqa/tests/test_origin_max_connections.py b/ci/tsqa/tests/test_origin_max_connections.py deleted file mode 100644 index c5bf41a3c37..00000000000 --- a/ci/tsqa/tests/test_origin_max_connections.py +++ /dev/null @@ -1,213 +0,0 @@ -''' -Test the configure entry : proxy.config.http.origin_max_connections -''' - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time -import logging -import uuid -import socket -import requests -import tsqa.test_cases -import helpers -import thread -from multiprocessing import Pool -import SocketServer -import os - -log = logging.getLogger(__name__) - - -# TODO: seems like a useful shared class- either add to httpbin or some shared lib -class KAHandler(SocketServer.BaseRequestHandler): - '''SocketServer that returns the connection-id as the body - ''' - # class variable to set number of active sessions - alive_sessions = 0 - - def handle(self): - KAHandler.alive_sessions += 1 - # Receive the data in small chunks and retransmit it - conn_id = uuid.uuid4().hex - start = time.time() - while True: - data = self.request.recv(4096).strip() - if data: - log.info('Sending data back to the client: {uid}'.format(uid=conn_id)) - else: - log.info('Client disconnected: {timeout}seconds'.format(timeout=time.time() - start)) - break - body = conn_id - if 'timeout' in data: - print 'sleep for a long time!' - time.sleep(4) - else: - time.sleep(2) - resp = ('HTTP/1.1 200 OK\r\n' - 'Content-Length: {content_length}\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: keep-alive\r\n' - 'X-Current-Sessions: {alive_sessions}\r\n' - '\r\n' - '{body}'.format(content_length=len(body), alive_sessions=KAHandler.alive_sessions, body=body)) - self.request.sendall(resp) - KAHandler.alive_sessions -= 1 - - -class TestKeepAlive_Origin_Max_connections(helpers.EnvironmentCase): - @classmethod - def setUpEnv(cls, env): - cls.traffic_server_host = '127.0.0.1' - cls.traffic_server_port = int(cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - cls.socket_server_port = int(tsqa.utils.bind_unused_port()[1]) - - log.info("socket_server_port = %d" % (cls.socket_server_port)) - cls.server = tsqa.endpoint.SocketServerDaemon(KAHandler, port=cls.socket_server_port) - cls.server.start() - cls.server.ready.wait() - - cls.socket_server_port2 = int(tsqa.utils.bind_unused_port()[1]) - cls.server2 = tsqa.endpoint.SocketServerDaemon(KAHandler, port=cls.socket_server_port2) - cls.server2.start() - cls.server2.ready.wait() - - queue_path = os.path.join(cls.environment.layout.sysconfdir, 'queue.conf') - with open(queue_path, 'w') as fh: - fh.write('CONFIG proxy.config.http.origin_max_connections_queue INT 2') - - noqueue_path = os.path.join(cls.environment.layout.sysconfdir, 'noqueue.conf') - with open(noqueue_path, 'w') as fh: - fh.write('CONFIG proxy.config.http.origin_max_connections_queue INT 0') - - cls.configs['remap.config'].add_line('map /other/queue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port2, queue_path)) - cls.configs['remap.config'].add_line('map /other/noqueue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port2, noqueue_path)) - cls.configs['remap.config'].add_line('map /other/ http://127.0.0.1:{0}'.format(cls.socket_server_port2)) - cls.configs['remap.config'].add_line('map /queue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port, queue_path)) - cls.configs['remap.config'].add_line('map /noqueue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port, noqueue_path)) - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}'.format(cls.socket_server_port)) - - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.origin_max_connections': 1, - 'proxy.config.http.keep_alive_enabled_out': 1, - 'proxy.config.http.keep_alive_no_activity_timeout_out': 1, - 'proxy.config.http.transaction_active_timeout_out': 2, - 'proxy.config.http.connect_attempts_timeout': 2, - 'proxy.config.http.connect_attempts_rr_retries': 0, - 'proxy.config.exec_thread.limit': 1, - 'proxy.config.exec_thread.autoconfig': 0, - }) - - def _send_requests(self, total_requests, path='', other=False): - url = 'http://{0}:{1}/{2}'.format(self.traffic_server_host, self.traffic_server_port, path) - url2 = 'http://{0}:{1}/other/{2}'.format(self.traffic_server_host, self.traffic_server_port, path) - jobs = [] - jobs2 = [] - pool = Pool(processes=4) - for _ in xrange(0, total_requests): - jobs.append(pool.apply_async(requests.get, (url,))) - if other: - jobs2.append(pool.apply_async(requests.get, (url2,))) - - results = [] - results2 = [] - for j in jobs: - try: - results.append(j.get()) - except Exception as e: - results.append(e) - - for j in jobs2: - try: - results2.append(j.get()) - except Exception as e: - results2.append(e) - - return results, results2 - - - # TODO: enable after TS-4340 is merged - # and re-enable `other` for the remaining queueing tests - def tesst_origin_scoping(self): - '''Send 2 requests to loopback (on separate ports) and ensure that they run in parallel - ''' - results, results2 = self._send_requests(1, other=True) - - # TS-4340 - # ensure that the 2 origins (2 different ports on loopback) were running in parallel - for i in xrange(0, REQUEST_COUNT): - self.assertEqual(int(results[i].get().headers['X-Current-Sessions']), 2) - self.assertEqual(int(results2[i].get().headers['X-Current-Sessions']), 2) - - def test_origin_default_queueing(self): - '''By default we have no queue limit - ''' - REQUEST_COUNT = 4 - results, results2 = self._send_requests(REQUEST_COUNT) - - for x in xrange(0, REQUEST_COUNT): - self.assertEqual(results[x].status_code, 200) - #self.assertEqual(results2[x].status_code, 200) - - def test_origin_queueing(self): - '''If a queue is set, N requests are queued and the rest immediately fail - ''' - REQUEST_COUNT = 4 - results, results2 = self._send_requests(REQUEST_COUNT, path='queue/') - - success = 0 - fail = 0 - for x in xrange(0, REQUEST_COUNT): - if results[x].status_code == 200: - success += 1 - else: - fail += 1 - self.assertEqual(success, 3) - - def test_origin_queueing_timeouts(self): - '''Lets have some requests timeout and ensure that the queue is freed up - ''' - REQUEST_COUNT = 4 - results, results2 = self._send_requests(REQUEST_COUNT, path='queue/timeout') - - success = 0 - fail = 0 - for x in xrange(0, REQUEST_COUNT): - if results[x].status_code == 200: - success += 1 - print 'success', x - else: - fail += 1 - self.assertEqual(fail, 4) - - self.test_origin_queueing() - - def test_origin_no_queueing(self): - '''If the queue is set to 0, all requests past the max immediately fail - ''' - REQUEST_COUNT = 4 - results, results2 = self._send_requests(REQUEST_COUNT, path='noqueue/') - - success = 0 - fail = 0 - for x in xrange(0, REQUEST_COUNT): - if results[x].status_code == 200: - success += 1 - else: - fail += 1 - print 'results:', success, fail - self.assertEqual(success, 1) diff --git a/ci/tsqa/tests/test_origin_min_keep_alive_connection.py b/ci/tsqa/tests/test_origin_min_keep_alive_connection.py deleted file mode 100644 index 36625f597ec..00000000000 --- a/ci/tsqa/tests/test_origin_min_keep_alive_connection.py +++ /dev/null @@ -1,99 +0,0 @@ -''' -Test the configure entry : proxy.config.http.origin_min_keep_alive_connections -''' - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time -import logging -import uuid -import socket -import requests -import tsqa.test_cases -import helpers -import SocketServer - -log = logging.getLogger(__name__) - - -class KAHandler(SocketServer.BaseRequestHandler): - """ - A subclass of RequestHandler which return chunked encoding optionally - - /parts/sleep_time/close - parts: number of parts to send - sleep_time: time between parts - close: bool whether to close properly - """ - - def handle(self): - # Receive the data in small chunks and retransmit it - conn_id = uuid.uuid4().hex - start = time.time() - while True: - data = self.request.recv(4096).strip() - if data: - log.info('Sending data back to the client: {uid}'.format(uid=conn_id)) - else: - log.info('Client disconnected: {timeout}seconds'.format(timeout=time.time() - start)) - break - body = conn_id - time.sleep(1) - resp = ('HTTP/1.1 200 OK\r\n' - 'Content-Length: {content_length}\r\n' - 'Content-Type: text/html; charset=UTF-8\r\n' - 'Connection: keep-alive\r\n' - '\r\n' - '{body}'.format(content_length=len(body), body=body)) - self.request.sendall(resp) - - -class TestKeepAlive_Origin_Min_connections(helpers.EnvironmentCase): - @classmethod - def setUpEnv(cls, env): - cls.traffic_server_host = '127.0.0.1' - cls.traffic_server_port = int(cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - cls.socket_server_port = int(tsqa.utils.bind_unused_port()[1]) - log.info("socket_server_port = %d" % (cls.socket_server_port)) - cls.server = tsqa.endpoint.SocketServerDaemon(KAHandler, port=cls.socket_server_port) - cls.server.start() - cls.server.ready.wait() - - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}'.format(cls.socket_server_port)) - cls.origin_keep_alive_timeout = 1 - - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.origin_min_keep_alive_connections': 1, - 'proxy.config.http.keep_alive_enabled_out': 1, - 'proxy.config.http.keep_alive_no_activity_timeout_out': cls.origin_keep_alive_timeout, - 'proxy.config.exec_thread.limit': 1, - 'proxy.config.exec_thread.autoconfig': 0, - }) - - def test_origin_min_connection(self): - response_uuids = [] - # make the request N times, ensure that they are on the same connection - for _ in xrange(0, 3): - ret = requests.get('http://{0}:{1}/'.format(self.traffic_server_host, self.traffic_server_port)) - response_uuids.append(ret.text) - - self.assertEqual(1, len(set(response_uuids))) - - # sleep for a time greater than the keepalive timeout and ensure its the same connection - time.sleep(self.origin_keep_alive_timeout * 2) - ret = requests.get('http://{0}:{1}/'.format(self.traffic_server_host, self.traffic_server_port)) - self.assertEqual(ret.text, response_uuids[0]) diff --git a/ci/tsqa/tests/test_redirection.py b/ci/tsqa/tests/test_redirection.py deleted file mode 100644 index 5a0b4ce960c..00000000000 --- a/ci/tsqa/tests/test_redirection.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import requests -import helpers -import tsqa.test_cases -import tsqa.utils -import tsqa.endpoint - - -class TestRedirection(helpers.EnvironmentCase, tsqa.test_cases.HTTPBinCase): - @classmethod - def setUpEnv(cls, env): - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.http.redirection_enabled': 1, - 'proxy.config.http.number_of_redirections': 10 - }) - cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}'.format(cls.http_endpoint.address[1])) - - def test_redirection(self): - server_ports = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] - - # By default Requests will perform location redirection - # Disable redirection handling with the allow_redirects parameter - r = requests.get('http://127.0.0.1:{0}/redirect/9'.format(server_ports), allow_redirects=False) - self.assertEqual(r.status_code, 200) - - r = requests.get('http://127.0.0.1:{0}/redirect/10'.format(server_ports), allow_redirects=False) - self.assertEqual(r.status_code, 302) diff --git a/ci/tsqa/tests/test_remap.py b/ci/tsqa/tests/test_remap.py deleted file mode 100644 index 9b42f103a3b..00000000000 --- a/ci/tsqa/tests/test_remap.py +++ /dev/null @@ -1,130 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import requests -import logging - -import helpers - -import tsqa.test_cases -import tsqa.utils -import tsqa.endpoint - -log = logging.getLogger(__name__) - - -class TestRemapHTTP(tsqa.test_cases.DynamicHTTPEndpointCase, helpers.EnvironmentCase): - @classmethod - def setUpEnv(cls, env): - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'url.*', - }) - - cls.configs['remap.config'].add_line( - 'map http://www.example.com http://127.0.0.1:{0}'.format(cls.http_endpoint.address[1]) - ) - cls.configs['remap.config'].add_line( - 'map http://www.example.com:8080 http://127.0.0.1:{0}'.format(cls.http_endpoint.address[1]) - ) - - def hello(request): - return 'hello' - cls.http_endpoint.add_handler('/', hello) - - def test_remap_http(self): - s = requests.Session() - http_port = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] - url = 'http://127.0.0.1:{0}/'.format(http_port) - - ret = s.get(url) - self.assertEqual(ret.status_code, 404) - - s.headers.update({'Host': 'www.example.com'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 200) - - s.headers.update({'Host': 'www.example.com:80'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 200) - - s.headers.update({'Host': 'www.example.com:8080'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 200) - - s.headers.update({'Host': 'www.test.com'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 404) - - s.headers.update({'Host': 'www.example.com:1234'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 404) - - -class TestRemapHTTPS(tsqa.test_cases.DynamicHTTPEndpointCase, helpers.EnvironmentCase): - @classmethod - def setUpEnv(cls, env): - # set an SSL port to ATS - cls.ssl_port = tsqa.utils.bind_unused_port()[1] - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.ssl_port) - cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'url.*' - }) - - cls.configs['remap.config'].add_line( - 'map https://www.example.com http://127.0.0.1:{0}'.format(cls.http_endpoint.address[1]) - ) - cls.configs['remap.config'].add_line( - 'map https://www.example.com:4443 http://127.0.0.1:{0}'.format(cls.http_endpoint.address[1]) - ) - # configure SSL multicert - cls.configs['ssl_multicert.config'].add_line( - 'dest_ip=* ssl_cert_name={0}'.format(helpers.tests_file_path('rsa_keys/www.example.com.pem')) - ) - - def hello(request): - return 'hello' - cls.http_endpoint.add_handler('/', hello) - - def test_remap_https(self): - s = requests.Session() - url = 'https://127.0.0.1:{0}/'.format(self.ssl_port) - - # We lack of SNI support in requests module, so we do not verify SSL certificate here. - # ret = s.get(url, verify=(helpers.tests_file_path('certs/ca.crt'))) - ret = s.get(url, verify=False) - self.assertEqual(ret.status_code, 404) - - s.headers.update({'Host': 'www.example.com'}) - ret = s.get(url, verify=False) - self.assertEqual(ret.status_code, 200) - - s.headers.update({'Host': 'www.example.com:443'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 200) - - s.headers.update({'Host': 'www.example.com:4443'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 200) - - s.headers.update({'Host': 'www.test.com'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 404) - - s.headers.update({'Host': 'www.example.com:1234'}) - ret = s.get(url) - self.assertEqual(ret.status_code, 404) diff --git a/ci/tsqa/tests/test_tls_ticket_key_rotation.py b/ci/tsqa/tests/test_tls_ticket_key_rotation.py deleted file mode 100644 index 7af883a529e..00000000000 --- a/ci/tsqa/tests/test_tls_ticket_key_rotation.py +++ /dev/null @@ -1,175 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from OpenSSL import SSL -import socket -import subprocess - -import helpers -import tsqa.utils - -import os - - -# helper function to get the path of a program. -def which(program): - def is_exe(fpath): - return os.path.isfile(fpath) and os.access(fpath, os.X_OK) - fpath, fname = os.path.split(program) - if fpath: - if is_exe(program): - return program - else: - for path in os.environ["PATH"].split(os.pathsep): - path = path.strip('"') - exe_file = os.path.join(path, program) - if is_exe(exe_file): - return exe_file - return None - - -class TestTLSTicketKeyRotation(helpers.EnvironmentCase): - """ - Test TLS session resumption through session tickets and TLS ticket key rotation. - """ - @classmethod - def setUpEnv(cls, env): - ''' - This function is responsible for setting up the environment for this fixture - This includes everything pre-daemon start - ''' - - # add an SSL port to ATS - cls.ssl_port = tsqa.utils.bind_unused_port()[1] - cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.ssl_port) - cls.configs['records.config']['CONFIG']['proxy.config.diags.debug.enabled'] = 1 - cls.configs['records.config']['CONFIG']['proxy.config.diags.debug.tags'] = 'ssl' - - # configure SSL multicert - - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0} ssl_key_name={1} ticket_key_name={2}'.format(helpers.tests_file_path('rsa_keys/ca.crt'), helpers.tests_file_path('rsa_keys/ca.key'), helpers.tests_file_path('rsa_keys/ssl_ticket.key'))) - - def start_connection(self, addr): - ''' - Return the certificate for addr. - ''' - ctx = SSL.Context(SSL.SSLv23_METHOD) - # Set up client - sock = SSL.Connection(ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM)) - sock.connect(addr) - sock.do_handshake() - - def test_tls_ticket_resumption(self): - ''' - Make sure the new ticket key is loaded - ''' - addr = ('127.0.0.1', self.ssl_port) - self.start_connection(addr) - - # openssl s_client -connect 127.0.0.1:443 -tls1 < /dev/null - sess = os.path.join(self.environment.layout.logdir, 'sess') - ticket_cmd = 'echo | openssl s_client -connect {0}:{1} -sess_out {2}'.format(addr[0], addr[1], sess) - - # check whether TLS session tickets are received by s_client. - stdout, _ = tsqa.utils.run_sync_command(ticket_cmd, stdout=subprocess.PIPE, shell=True) - ticket_exists = False - for line in stdout.splitlines(): - text = line.strip() - if text.startswith("TLS session ticket:"): - ticket_exists = True - break - self.assertTrue(ticket_exists, "Sesssion tickets are not received") - - # check whether the session has been reused - reused = False - ticket_cmd = 'echo | openssl s_client -connect {0}:{1} -sess_in {2}'.format(addr[0], addr[1], sess) - stdout, _ = tsqa.utils.run_sync_command(ticket_cmd, stdout=subprocess.PIPE, shell=True) - for line in stdout.splitlines(): - text = line.strip() - if text.startswith("Reused, TLSv1/SSLv3,"): - reused = True - break - self.assertTrue(reused, "TLS session was not reused!") - - # negative test case. The session is not reused. - reused = False - ticket_cmd = 'echo | openssl s_client -connect {0}:{1}'.format(addr[0], addr[1]) - stdout, _ = tsqa.utils.run_sync_command(ticket_cmd, stdout=subprocess.PIPE, shell=True) - for line in stdout.splitlines(): - text = line.strip() - if text.startswith("Reused, TLSv1/SSLv3,"): - reused = True - break - self.assertFalse(reused, "TLS session has been reused!") - - def test_tls_ticket_rotation(self): - ''' - Make sure the new ticket key is loaded - ''' - traffic_ctl = os.path.join(self.environment.layout.bindir, 'traffic_ctl') - addr = ('127.0.0.1', self.ssl_port) - self.start_connection(addr) - - ''' - openssl s_client -connect server_ip:ssl_port -tls1 < /dev/null - ''' - - # Generate and push a new ticket key - rotate_cmd = 'openssl rand 48 -base64 > {0}'.format(helpers.tests_file_path('rsa_keys/ssl_ticket.key')) - stdout, _ = tsqa.utils.run_sync_command(rotate_cmd, stdout=subprocess.PIPE, shell=True) - - # touch the ssl_multicert.config file - ssl_multicert = os.path.join(self.environment.layout.sysconfdir, 'ssl_multicert.config') - - read_renewed_cmd = [ - traffic_ctl, 'config', 'get', 'proxy.process.ssl.total_ticket_keys_renewed' - ] - - # Check whether the config file exists. - self.assertTrue(os.path.isfile(ssl_multicert), ssl_multicert) - touch_cmd = which('touch') + ' ' + ssl_multicert - tsqa.utils.run_sync_command(touch_cmd, stdout=subprocess.PIPE, shell=True) - - count = 0 - while True: - try: - stdout, _ = tsqa.utils.run_sync_command(read_renewed_cmd, stdout=subprocess.PIPE, shell=True) - old_renewed = stdout - break - except Exception: - count += 1 - # If we have tried 30 times and the command still failed, quit here. - if count > 30: - self.assertTrue(False, "Failed to get the number of renewed keys!") - - signal_cmd = [traffic_ctl, 'config', 'reload'] - tsqa.utils.run_sync_command(signal_cmd, stdout=subprocess.PIPE, shell=True) - - # wait for the ticket keys to be sucked in by traffic_server. - count = 0 - while True: - try: - stdout, _ = tsqa.utils.run_sync_command(read_renewed_cmd, stdout=subprocess.PIPE, shell=True) - cur_renewed = stdout - if old_renewed != cur_renewed: - break - except Exception: - ++count - if count > 30: - self.assertTrue(False, "Failed to get the number of renewed keys!") - - # the number of ticket keys renewed has been increased. - self.assertNotEqual(old_renewed, cur_renewed) diff --git a/cmd/traffic_manager/metrics.cc b/cmd/traffic_manager/metrics.cc index 988cce7574b..f06d5a5b37a 100644 --- a/cmd/traffic_manager/metrics.cc +++ b/cmd/traffic_manager/metrics.cc @@ -46,6 +46,7 @@ struct Evaluator { bool bind(lua_State *L, const char *metric, const char *expression) { + const char *err = nullptr; if (RecGetRecordDataType(metric, &this->data_type) != REC_ERR_OKAY) { return false; } @@ -55,8 +56,9 @@ struct Evaluator { switch (luaL_loadstring(L, expression)) { case LUA_ERRSYNTAX: case LUA_ERRMEM: - Debug("lua", "loadstring failed for %s", metric); - luaL_error(L, "invalid expression for %s", metric); + err = luaL_checkstring(L, -1); + Debug("lua", "loadstring failed for %s: %s", metric, err); + luaL_error(L, "invalid expression for %s: %s", metric, err); return false; case 0: break; // success diff --git a/configure.ac b/configure.ac index 7401e1ef5cc..f067b0df3df 100644 --- a/configure.ac +++ b/configure.ac @@ -1857,6 +1857,11 @@ AS_IF([test "x$enable_experimental_plugins" = "xyes"], AM_CONDITIONAL([BUILD_REMAP_STATS_PLUGIN], [ test "x$enable_experimental_plugins" = "xyes" -a "x$ac_cv_header_search_h" = "xyes" -a "x$ac_cv_type_struct_hsearch_data" = "xyes" -a "x$ac_cv_func_hcreate_r" = "xyes" -a "x$ac_cv_func_hsearch_r" = "xyes" ]) +# This needed to be promoted to be more global. +# For now just use the same hardwired value, but this could now be set up to be configured if useful. +default_stack_size=1048576 +AC_SUBST([default_stack_size], [$default_stack_size]) + # # use modular IOCORE # diff --git a/doc/admin-guide/files/records.config.en.rst b/doc/admin-guide/files/records.config.en.rst index b1dfed9cf1a..6fea6437f43 100644 --- a/doc/admin-guide/files/records.config.en.rst +++ b/doc/admin-guide/files/records.config.en.rst @@ -1091,19 +1091,6 @@ ip-resolve according to this setting then it will be used, otherwise it will be released to the pool and a different session selected or created. -.. ts:cv:: CONFIG proxy.config.http.safe_requests_retryable INT 1 - :overridable: - - This setting, on by default, allows requests which are considered safe to be retried on an error. - See https://tools.ietf.org/html/rfc7231#section-4.2.1 to RFC for details on which request methods are considered safe. - - If this setting is ``0`` then ATS retries a failed origin server request only if the bytes sent by ATS - are not acknowledged by the origin server. - - If this setting is ``1`` then ATS retries all the safe methods to a failed origin server irrespective of - previous connection failure status. - - .. ts:cv:: CONFIG proxy.config.http.record_heartbeat INT 0 :reloadable: @@ -2928,6 +2915,7 @@ Diagnostic Logging Configuration for Debug() messages only. Set to 2 to enable for all messages. .. ts:cv:: CONFIG proxy.config.diags.debug.enabled INT 0 + :reloadable: Enables logging for diagnostic messages whose log level is `diag` or `debug`. diff --git a/doc/admin-guide/files/ssl_multicert.config.en.rst b/doc/admin-guide/files/ssl_multicert.config.en.rst index 25bab6efcb7..e8112a78cb7 100644 --- a/doc/admin-guide/files/ssl_multicert.config.en.rst +++ b/doc/admin-guide/files/ssl_multicert.config.en.rst @@ -98,20 +98,8 @@ ssl_ticket_enabled=1|0 (optional) OpenSSL should be upgraded to version 0.9.8f or higher. This option must be set to `0` to disable session ticket support. -ticket_key_name=FILENAME (optional) - The name of session ticket key file which contains a secret for - encrypting and decrypting TLS session tickets. If *FILENAME* is - not an absolute path, it is resolved relative to the - :ts:cv:`proxy.config.ssl.server.cert.path` configuration variable. - This option has no effect if session tickets are disabled by the - ``ssl_ticket_enabled`` option. The contents of the key file should - be 48 random (ASCII) bytes. One way to generate this would be to run - ``head -c48 /dev/urandom | openssl enc -base64 | head -c48 > file.ticket``. - - Session ticket support is enabled by default. If neither of the - ``ssl_ticket_enabled`` and ``ticket_key_name`` options are - specified, and internal session ticket key is generated. This - key will be different each time Traffic Server is started. +ticket_key_name=FILENAME (optional) [**REMOVED in 7.1.x and 8.0**] + Ticket key should be set in records.config via :ts:cv:`proxy.config.ssl.server.ticket_key.filename` ssl_key_dialog=builtin|"exec:/path/to/program [args]" (optional) Method used to provide a pass phrase for encrypted private keys. If the diff --git a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst index 0f16897b27f..0f9e0a8fb12 100644 --- a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst +++ b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst @@ -73,7 +73,6 @@ c:member:`TS_CONFIG_HTTP_ANONYMIZE_REMOVE_FROM` :ts:cv:`prox c:member:`TS_CONFIG_HTTP_ANONYMIZE_REMOVE_REFERER` :ts:cv:`proxy.config.http.anonymize_remove_referer` c:member:`TS_CONFIG_HTTP_ANONYMIZE_REMOVE_USER_AGENT` :ts:cv:`proxy.config.http.anonymize_remove_user_agent` c:member:`TS_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT` :ts:cv:`proxy.config.http.attach_server_session_to_client` -c:member:`TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE` :ts:cv:`proxy.config.http.safe_requests_retryable` c:member:`TS_CONFIG_HTTP_AUTH_SERVER_SESSION_PRIVATE` :ts:cv:`proxy.config.http.auth_server_session_private` c:member:`TS_CONFIG_HTTP_BACKGROUND_FILL_ACTIVE_TIMEOUT` :ts:cv:`proxy.config.http.background_fill_active_timeout` c:member:`TS_CONFIG_HTTP_BACKGROUND_FILL_COMPLETED_THRESHOLD` :ts:cv:`proxy.config.http.background_fill_completed_threshold` diff --git a/doc/developer-guide/api/functions/TSHttpTxnMilestoneGet.en.rst b/doc/developer-guide/api/functions/TSHttpTxnMilestoneGet.en.rst index c806321626c..e0cd0ac6d14 100644 --- a/doc/developer-guide/api/functions/TSHttpTxnMilestoneGet.en.rst +++ b/doc/developer-guide/api/functions/TSHttpTxnMilestoneGet.en.rst @@ -42,31 +42,104 @@ is successful. .. type:: TSMilestonesType -=============================================== ========== -Value Milestone -=============================================== ========== -:const:`TS_MILESTONE_SM_START` Transaction state machine is initialized. -:const:`TS_MILESTONE_UA_BEGIN` The client connection is accepted. -:const:`TS_MILESTONE_PLUGIN_ACTIVE` Amount of time plugins were active plus start time. -:const:`TS_MILESTONE_PLUGIN_TOTAL` Wall time while plugins were active plus start time. -:const:`TS_MILESTONE_UA_READ_HEADER_DONE` The request header from the client has been read and parsed. -:const:`TS_MILESTONE_CACHE_OPEN_READ_BEGIN` Initiate read of the cache. -:const:`TS_MILESTONE_CACHE_OPEN_READ_END` Initial cache read has resolved. -:const:`TS_MILESTONE_CACHE_OPEN_WRITE_BEGIN` Start open for cache write. -:const:`TS_MILESTONE_CACHE_OPEN_WRITE_END` Cache has been opened for write. -:const:`TS_MILESTONE_DNS_LOOKUP_BEGIN` Initiate host resolution in HostDB -:const:`TS_MILESTONE_DNS_LOOKUP_END` Host resolution resolves. -:const:`TS_MILESTONE_SERVER_FIRST_CONNECT` First time origin server connect attempted or shared shared session attached. -:const:`TS_MILESTONE_SERVER_CONNECT` Most recent time origin server connect attempted or shared session attached. -:const:`TS_MILESTONE_SERVER_CONNECT_END` More recent time a connection attempt was resolved. -:const:`TS_MILESTONE_SERVER_BEGIN_WRITE` First byte is written to the origin server connection. -:const:`TS_MILESTONE_SERVER_FIRST_READ` First byte is read from connection to origin server. -:const:`TS_MILESTONE_SERVER_READ_HEADER_DONE` Origin server response has been read and parsed. -:const:`TS_MILESTONE_UA_BEGIN_WRITE` The response header write to the client starts. -:const:`TS_MILESTONE_SERVER_CLOSE` Last I/O activity on origin server connection. -:const:`TS_MILESTONE_UA_CLOSE` Last I/O activity on the client socket, or connection abort. -:const:`TS_MILESTONE_SM_FINISH` Transaction has finished, state machine final logging has started. -=============================================== ========== + An enumeration of the valid indices of transaction milestone data. + + .. macro:: TS_MILESTONE_SM_START + + Transaction state machine is initialized. + + .. macro:: TS_MILESTONE_UA_BEGIN + + The client connection is accepted. + + .. macro:: TS_MILESTONE_PLUGIN_ACTIVE + + Amount of time plugins were active plus start time. + + .. macro:: TS_MILESTONE_PLUGIN_TOTAL + + Wall time while plugins were active plus start time. + + .. macro:: TS_MILESTONE_UA_READ_HEADER_DONE + + The request header from the client has been read and parsed. + + .. macro:: TS_MILESTONE_CACHE_OPEN_READ_BEGIN + + Initiate read of the cache. + + .. macro:: TS_MILESTONE_CACHE_OPEN_READ_END + + Initial cache read has resolved. + + .. macro:: TS_MILESTONE_CACHE_OPEN_WRITE_BEGIN + + Start open for cache write. + + .. macro:: TS_MILESTONE_CACHE_OPEN_WRITE_END + + Cache has been opened for write. + + .. macro:: TS_MILESTONE_DNS_LOOKUP_BEGIN + + Initiate host resolution in HostDB + + .. macro:: TS_MILESTONE_DNS_LOOKUP_END + + Host resolution resolves. + + .. macro:: TS_MILESTONE_SERVER_FIRST_CONNECT + + First time origin server connect attempted or shared shared session attached. + + .. macro:: TS_MILESTONE_SERVER_CONNECT + + Most recent time origin server connect attempted or shared session attached. + + .. macro:: TS_MILESTONE_SERVER_CONNECT_END + + More recent time a connection attempt was resolved. + + .. macro:: TS_MILESTONE_SERVER_BEGIN_WRITE + + First byte is written to the origin server connection. + + .. macro:: TS_MILESTONE_SERVER_FIRST_READ + + First byte is read from connection to origin server. + + .. macro:: TS_MILESTONE_SERVER_READ_HEADER_DONE + + Origin server response has been read and parsed. + + .. macro:: TS_MILESTONE_UA_BEGIN_WRITE + + The response header write to the client starts. + + .. macro:: TS_MILESTONE_SERVER_CLOSE + + Last I/O activity on origin server connection. + + .. macro:: TS_MILESTONE_UA_CLOSE + + Last I/O activity on the client socket, or connection abort. + + .. macro:: TS_MILESTONE_SM_FINISH + + Transaction has finished, state machine final logging has started. + + .. macro:: TS_MILESTONE_PLUGIN_ACTIVE + + Amount of time plugins were active (running plugin code). + + .. macro:: TS_MILESTONE_PLUGIN_TOTAL + + Amount of time spent in or waiting for plugins. + + .. macro:: TS_MILESTONE_LAST_ENTRY + + A psuedo index which is set to be one more than the last valid index. This is useful for looping over the data. + * The server connect times predate the transmission of the :literal:`SYN` packet. That is, before a connection to the origin server is completed. @@ -74,7 +147,7 @@ Value Milestone * A connection attempt is resolved when no more connection related activity remains to be done, and the connection is either established or has failed. -* :const:`TS_MILESTONE_UA_CLOSE` and :const:`TS_MILESTONE_SERVER_CLOSE` are +* :macro:`TS_MILESTONE_UA_CLOSE` and :macro:`TS_MILESTONE_SERVER_CLOSE` are updated continuously during the life of the transaction, every time there is I/O activity. The updating stops when the corresponding connection is closed, leaving the last I/O time as the final value. @@ -82,21 +155,21 @@ Value Milestone * The cache :literal:`OPEN` milestones time only the initial setup, the *open*, not the full read or write. -* :const:`TS_MILESTONE_PLUGIN_ACTIVE` and :const:`TS_MILESTONE_PLUGIN_TOTAL` are different from the other milestones as - they measure elapsed time, not event time. The value is the elapsed time *plus* :const:`TS_MILESTONE_SM_START`. This +* :macro:`TS_MILESTONE_PLUGIN_ACTIVE` and :macro:`TS_MILESTONE_PLUGIN_TOTAL` are different from the other milestones as + they measure elapsed time, not event time. The value is the elapsed time *plus* :macro:`TS_MILESTONE_SM_START`. This was decided to be more convenient because then these milestones can be handled / displayed in the same way as the - other milestones, as offsets from :const:`TS_MILESTONE_SM_START`. + other milestones, as offsets from :macro:`TS_MILESTONE_SM_START`. - :const:`TS_MILESTONE_PLUGIN_ACTIVE` value is the amount of time the plugin was active, that is performing - computation. :const:`TS_MILESTONE_PLUGIN_TOTAL` is the wall time which includes any time the transaction was blocked + :macro:`TS_MILESTONE_PLUGIN_ACTIVE` value is the amount of time the plugin was active, that is performing + computation. :macro:`TS_MILESTONE_PLUGIN_TOTAL` is the wall time which includes any time the transaction was blocked while a plugin was active. For instance if a plugin waits on an external event, that waiting time will be in - :const:`TS_MILESTONE_PLUGIN_TOTAL` but not in :const:`TS_MILESTONE_PLUGIN_ACTIVE`. + :macro:`TS_MILESTONE_PLUGIN_TOTAL` but not in :macro:`TS_MILESTONE_PLUGIN_ACTIVE`. Return Values ============= -:const:`TS_SUCCESS` if successful and :arg:`time` was updated, otherwise -:const:`TS_ERROR`. +:macro:`TS_SUCCESS` if successful and :arg:`time` was updated, otherwise +:macro:`TS_ERROR`. See Also ======== diff --git a/doc/developer-guide/api/functions/TSPluginInit.en.rst b/doc/developer-guide/api/functions/TSPluginInit.en.rst index 94ac6442c12..232bb79fbe3 100644 --- a/doc/developer-guide/api/functions/TSPluginInit.en.rst +++ b/doc/developer-guide/api/functions/TSPluginInit.en.rst @@ -71,7 +71,7 @@ Examples info.support_email = "ts-api-support@MyCompany.com"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } } diff --git a/doc/developer-guide/api/functions/TSSslServerContextCreate.en.rst b/doc/developer-guide/api/functions/TSSslServerContextCreate.en.rst index 74e32812014..c9e1429f5d1 100644 --- a/doc/developer-guide/api/functions/TSSslServerContextCreate.en.rst +++ b/doc/developer-guide/api/functions/TSSslServerContextCreate.en.rst @@ -47,8 +47,8 @@ Type .. type:: TSSslContext -The SSL context object. This is an opaque type that can be cast to -the underlying SSL library type (:code:`SSL_CTX *` for the OpenSSL library). + The SSL context object. This is an opaque type that can be cast to + the underlying SSL library type (:code:`SSL_CTX *` for the OpenSSL library). See also ======== diff --git a/doc/developer-guide/api/functions/TSTrafficServerVersionGet.en.rst b/doc/developer-guide/api/functions/TSTrafficServerVersionGet.en.rst index a0d20a61308..076ab1e7294 100644 --- a/doc/developer-guide/api/functions/TSTrafficServerVersionGet.en.rst +++ b/doc/developer-guide/api/functions/TSTrafficServerVersionGet.en.rst @@ -88,7 +88,7 @@ Example info.support_email = "ts-api-support@MyCompany.com"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } if (!check_ts_version()) { diff --git a/doc/developer-guide/api/functions/TSVConnSslConnectionGet.en.rst b/doc/developer-guide/api/functions/TSVConnSslConnectionGet.en.rst index ba722c8a2e8..e4c1c490350 100644 --- a/doc/developer-guide/api/functions/TSVConnSslConnectionGet.en.rst +++ b/doc/developer-guide/api/functions/TSVConnSslConnectionGet.en.rst @@ -26,7 +26,7 @@ Synopsis `#include ` -.. function:: TSSslVConnection TSVConnSslConnectionGet(TSVConn svc) +.. function:: TSSslConnection TSVConnSslConnectionGet(TSVConn svc) Description =========== @@ -36,8 +36,7 @@ Get the SSL (per connection) object from the SSl connection :arg:`svc`. Types ===== -.. type:: TSSslVConnection - -The SSL (per connection) object. This is an opaque type that can be cast to the -appropriate type (SSL * for the OpenSSL library). +.. type:: TSSslConnection + The SSL (per connection) object. This is an opaque type that can be cast to the + appropriate type (:code:`SSL *` for the OpenSSL library). diff --git a/doc/developer-guide/api/types/TSMilestonesType.en.rst b/doc/developer-guide/api/types/TSMilestonesType.en.rst deleted file mode 100644 index ff5aaaf8f26..00000000000 --- a/doc/developer-guide/api/types/TSMilestonesType.en.rst +++ /dev/null @@ -1,78 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one or more - contributor license agreements. See the NOTICE file distributed - with this work for additional information regarding copyright - ownership. The ASF licenses this file to you under the Apache - License, Version 2.0 (the "License"); you may not use this file - except in compliance with the License. You may obtain a copy of - the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied. See the License for the specific language governing - permissions and limitations under the License. - -.. include:: ../../../common.defs - -TSMiletonesType -*************** - -Synopsis -======== - -`#include ` - -.. c:type:: TSMiletonesType - -Enum typedef. - -Enumeration Members -=================== - -.. c:member:: TSMiletonesType TS_MILESTONE_NULL - -.. c:member:: TSMiletonesType TS_MILESTONE_UA_BEGIN - -.. c:member:: TSMiletonesType TS_MILESTONE_UA_READ_HEADER_DONE - -.. c:member:: TSMiletonesType TS_MILESTONE_UA_BEGIN_WRITE - -.. c:member:: TSMiletonesType TS_MILESTONE_UA_CLOSE - -.. c:member:: TSMiletonesType TS_MILESTONE_SERVER_FIRST_CONNECT - -.. c:member:: TSMiletonesType TS_MILESTONE_SERVER_CONNECT - -.. c:member:: TSMiletonesType TS_MILESTONE_SERVER_CONNECT_END - -.. c:member:: TSMiletonesType TS_MILESTONE_SERVER_BEGIN_WRITE - -.. c:member:: TSMiletonesType TS_MILESTONE_SERVER_FIRST_READ - -.. c:member:: TSMiletonesType TS_MILESTONE_SERVER_READ_HEADER_DONE - -.. c:member:: TSMiletonesType TS_MILESTONE_SERVER_CLOSE - -.. c:member:: TSMiletonesType TS_MILESTONE_CACHE_OPEN_READ_BEGIN - -.. c:member:: TSMiletonesType TS_MILESTONE_CACHE_OPEN_READ_END - -.. c:member:: TSMiletonesType TS_MILESTONE_CACHE_OPEN_WRITE_BEGIN - -.. c:member:: TSMiletonesType TS_MILESTONE_CACHE_OPEN_WRITE_END - -.. c:member:: TSMiletonesType TS_MILESTONE_DNS_LOOKUP_BEGIN - -.. c:member:: TSMiletonesType TS_MILESTONE_DNS_LOOKUP_END - -.. c:member:: TSMiletonesType TS_MILESTONE_SM_START - -.. c:member:: TSMiletonesType TS_MILESTONE_SM_FINISH - -.. c:member:: TSMiletonesType TS_MILESTONE_LAST_ENTRY - -Description -=========== - diff --git a/doc/developer-guide/architecture/threads-and-events.en.rst b/doc/developer-guide/architecture/threads-and-events.en.rst new file mode 100644 index 00000000000..e1dcf4fde76 --- /dev/null +++ b/doc/developer-guide/architecture/threads-and-events.en.rst @@ -0,0 +1,135 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. include:: ../../common.defs + +.. default-domain:: cpp + +.. highlight:: cpp + +.. _threads-and-events: + +Threads and Event Processing +**************************** + +Interally |TS| is a cooperative multi-threaded environment. There are a fixed number of threads for core operations, determined at process start time. All core operations take place on one of these existing threads. Plugins may spawn additional threads but these are outside the scope of this document. + +Threads +======= + +|TS| has a taxonomy of thread types. At the most basic are threads as the operating system / system libraries provide. On top of this |TS| has a thing layer that is the :class:`Thread` which contains a mutex and a thread identifier. The logic for starting the thread at the system level is embedded in this class. All threads started by |TS| have an instance of this class (or subclass). Plugins can directly start their own threads via system calls and those are not tracked. :class:`Thread` sets up thread local storage via :code:`pthread_setspecific`. Threads can be started via an explicit function provided to :method:`Thread::start` or by subclassing :class:`Thread` and overriding :method:`Thread::execute`. + +:class:`EThread` is a subclass of :class:`Thread` which provides support for |TS| core operations. It is this class that provides support for using :class:`Continuation` instances. Each :class:`EThread` instance executes a single continuation at thread start. If the thread is :enumerator:`ThreadType::DEDICATED` it exits after invoking the start continuation. A :enumerator:`ThreadType::REGULAR` thread will process its event queue until explicitly stopped after executing the start continuation. + +Despite the name :class:`EventProcessor` is primarily a thread management class. It enables the creation and management of thread groups which are then used by the |TS| core for different types of computation. The set of groups is determined at run time via the :func:`EventProcessor::registerEventType`. Threads managed by :class:`EventProcessor` have the :class:`EThread` start continuation controlled by :class:`EventProcessor`. Instead each thread group (event type) has a list of continuations to run when a thread of that type starts. Continuations are added to the list with :func:`EventProcessor::schedule_spawn`. There are two variants of this method, one for continuations and one for just a function. The latter creates a continuation to call the function and then schedules that using the former. + +:class:`EventProcessor` is intended to be a singleton and the global instance is :var:`eventProcessor`. + +In general if a subsystem in the |TS| core is setting up a thread group, it should use code of the form :: + + int ET_GROUP; // global variable, where "GROUP" is repalced by the actual group / type name. + int n_group_threads = 3; // Want 3 of these threads by default, possibly changed by configuration options. + constexpr size_t GROUP_STACK_SIZE = 1 << 20; // stack size for each thread. + void Group_Thread_Init(EThread*); // function to perform per thread local initialization. + // + ET_GROUP = eventProcessor::registerEventType("Group"); + eventProcessor.schedule_spawn(&Group_Per_Thread_Init, ET_GROUP); + eventProcessor.spawn_event_threads(ET_GROUP, n_group_threads, GROUP_STACK_SIZE); + + +The function :code:`Group_Thread_Init` can be replaced with a continuation if that's more convenient. One advantage of a continuation is additional data (via :arg:`cookie`) can be provide during thread initialization. + +If there is no thread initializatoin needed, this can be compressed in to a single call :: + + ET_GROUP = eventProcessor.spawn_event_threads("Group", n_group_threads, GROUP_STACK_SIZE); + +This registers the group name and type, starts the threads, and returns the event type. + +TS-4265 Changes +=============== + +The essential change for TS-4625 is to provide a mechanism to arrange for code to be executed on :class:`EventProcessor` threads before starting event processing. This enables robust initialization of thread local data / state. Previously either the initialization would be done after thread start from a different thread by iterating over the threads, or "foreign" code being embdedded in the low level thread / event processing logic. + +Types +===== + +.. type:: EventType + + A thread classification value that represents the type of events the thread is expected to process. + +.. var:: EventType ET_CALL + + A predefined :type:`EventType` which always exists. This is deprecated, use :var:`ET_NET` instead. + +.. var:: EventType ET_NET + + A synonymn for :var:`ET_CALL`. + +.. var:: EventProcessor eventProcessor + + The global single instance of :class:`EventProcessor`. + +.. class:: Thread + + Wrapper for system level thread. + +.. class:: EThread + + Event processing thread. + + .. function:: EventType registerEventType(const char* name) + + Register an event type by name. This reserves an event type index which is returned as :type:`EventType`. + +.. enum:: ThreadType + + .. enumerator:: DEDICATED + + A thread which executes only the start contiuation and then exits. + + .. enumerator:: REGULAR + + A thread which executes the start continuation and then processes its event queue. + + .. enumerator:: MONITOR + + Unknown - does not appear to be used, should be removed entirely. + +.. class:: Continuation + + A future computation. A continuation has a :term:`handler` which is a class method with a + specific signature. A continuation is invoked by calling its handler. A future computation can be + referenced by an :class:`Action` instance. This is used primarily to allow the future work to be + canceled. + +.. class:: Action + + Reference to a future computation for a :class:`Continuation`. + +.. class:: Event : public Action + + Reference to code to dispatch. Note that an :class:`Event` is a type of :class:`Action`. This class combines the future computational reference of :class:`Action` + +.. class:: EventProcessor + + .. function:: Event * schedule_spawn(Continuation * c, EventType ev_type, int event = EVENT_IMMEDIATE, void * cookie = NULL) + + When the :class:`EventProcessor` starts a thread of type :arg:`ev_type`, :arg:`c` will be called before any events are dispatched by the thread. The handler for :arg:`c` will be called with an event code of :arg:`event` and data pointer of :arg:`cookie`. + + .. function:: Event * schedule_spawn(void (* f)(EThread * ), EventType ev_type); + + When the :class:`EventProcessor` starts a thread of type :arg:`ev_type` the function :arg:`f` will be called with a pointer to the :class:`EThread` instance which is starting. diff --git a/doc/getting-started/index.en.rst b/doc/getting-started/index.en.rst index 2ec3d17ff83..b9e7efcb2a9 100644 --- a/doc/getting-started/index.en.rst +++ b/doc/getting-started/index.en.rst @@ -122,7 +122,7 @@ RHEL / CentOS |TS| is available through the EPEL repositories. If you do not have those configured on your machine yet, you must install them first with the following:: - wget https://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm + wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm sudo rpm -Uvh epel-release-7*.rpm Ensuring that you replace the release number with a value that is appropriate diff --git a/doc/locale/ja/LC_MESSAGES/developer-guide/api/types/TSMilestonesType.en.po b/doc/locale/ja/LC_MESSAGES/developer-guide/api/types/TSMilestonesType.en.po index b71cd979237..a7f157af159 100644 --- a/doc/locale/ja/LC_MESSAGES/developer-guide/api/types/TSMilestonesType.en.po +++ b/doc/locale/ja/LC_MESSAGES/developer-guide/api/types/TSMilestonesType.en.po @@ -46,7 +46,7 @@ msgid "Synopsis" msgstr "概要" #: ../../developer-guide/api/types/TSMilestonesType.en.rst:20 -msgid "TSMiletonesType" +msgid "TSMilestonesType" msgstr "" #: ../../developer-guide/api/types/TSMilestonesType.en.rst:25 diff --git a/example/add_header/add_header.c b/example/add_header/add_header.c index a114c796daf..ebd6a24ce77 100644 --- a/example/add_header/add_header.c +++ b/example/add_header/add_header.c @@ -133,7 +133,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); goto error; } diff --git a/example/append_transform/append_transform.c b/example/append_transform/append_transform.c index 7b244c65ffb..67922f246ab 100644 --- a/example/append_transform/append_transform.c +++ b/example/append_transform/append_transform.c @@ -356,7 +356,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); goto Lerror; } diff --git a/example/basic_auth/basic_auth.c b/example/basic_auth/basic_auth.c index 8c16ac3e2d3..1743465c2ea 100644 --- a/example/basic_auth/basic_auth.c +++ b/example/basic_auth/basic_auth.c @@ -220,7 +220,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } /* Build translation table */ diff --git a/example/blacklist_0/blacklist_0.c b/example/blacklist_0/blacklist_0.c index 432bdfd5809..aa86c255142 100644 --- a/example/blacklist_0/blacklist_0.c +++ b/example/blacklist_0/blacklist_0.c @@ -158,7 +158,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } nsites = argc - 1; diff --git a/example/blacklist_1/blacklist_1.c b/example/blacklist_1/blacklist_1.c index a6641738241..b6c7efd978e 100644 --- a/example/blacklist_1/blacklist_1.c +++ b/example/blacklist_1/blacklist_1.c @@ -316,7 +316,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } /* create an TSTextLogObject to log blacklisted requests to */ diff --git a/example/bnull_transform/bnull_transform.c b/example/bnull_transform/bnull_transform.c index 589aa55e77b..97ed3baf674 100644 --- a/example/bnull_transform/bnull_transform.c +++ b/example/bnull_transform/bnull_transform.c @@ -310,7 +310,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); goto Lerror; } diff --git a/example/cache_scan/cache_scan.cc b/example/cache_scan/cache_scan.cc index 22f457ac65d..cd73dd5ff49 100644 --- a/example/cache_scan/cache_scan.cc +++ b/example/cache_scan/cache_scan.cc @@ -504,6 +504,6 @@ TSPluginInit(int /* argc ATS_UNUSED */, const char * /* argv ATS_UNUSED */ []) global_contp = TSContCreate(cache_print_plugin, TSMutexCreate()); TSHttpHookAdd(TS_HTTP_READ_REQUEST_HDR_HOOK, global_contp); } else { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } } diff --git a/example/disable_http2/disable_http2.cc b/example/disable_http2/disable_http2.cc index 36a94d0ed76..18b2637156d 100644 --- a/example/disable_http2/disable_http2.cc +++ b/example/disable_http2/disable_http2.cc @@ -95,13 +95,13 @@ TSPluginInit(int argc, char const *argv[]) ret = TSPluginRegister(&info); if (ret != TS_SUCCESS) { - TSError("[%s] registration failed.", PLUGIN_NAME); + TSError("[%s] registration failed", PLUGIN_NAME); return; } else if (argc < 2) { TSError("[%s] Usage %s.so servername1 servername2 ... ", PLUGIN_NAME, PLUGIN_NAME); return; } else { - TSDebug(PLUGIN_NAME, "registration succeeded."); + TSDebug(PLUGIN_NAME, "registration succeeded"); } for (int i = 1; i < argc; i++) { diff --git a/example/file_1/file_1.c b/example/file_1/file_1.c index 30eff6f80b7..21ee9400720 100644 --- a/example/file_1/file_1.c +++ b/example/file_1/file_1.c @@ -51,7 +51,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } for (i = 1; i < argc; i++) { diff --git a/example/hello/hello.c b/example/hello/hello.c index 34df0f7b6b9..9f4c6cbbe11 100644 --- a/example/hello/hello.c +++ b/example/hello/hello.c @@ -37,7 +37,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } TSDebug(PLUGIN_NAME, "Hello World!"); diff --git a/example/lifecycle_plugin/lifecycle_plugin.c b/example/lifecycle_plugin/lifecycle_plugin.c index a84e2132e38..19879c847ad 100644 --- a/example/lifecycle_plugin/lifecycle_plugin.c +++ b/example/lifecycle_plugin/lifecycle_plugin.c @@ -69,7 +69,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); goto Lerror; } @@ -86,5 +86,5 @@ TSPluginInit(int argc, const char *argv[]) return; Lerror: - TSError("[%s] Unable to initialize plugin (disabled).", PLUGIN_NAME); + TSError("[%s] Unable to initialize plugin (disabled)", PLUGIN_NAME); } diff --git a/example/null_transform/null_transform.c b/example/null_transform/null_transform.c index cf604241a4a..cc941cd9e68 100644 --- a/example/null_transform/null_transform.c +++ b/example/null_transform/null_transform.c @@ -317,7 +317,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); goto Lerror; } @@ -326,5 +326,5 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) return; Lerror: - TSError("[%s] Unable to initialize plugin (disabled).", PLUGIN_NAME); + TSError("[%s] Unable to initialize plugin (disabled)", PLUGIN_NAME); } diff --git a/example/output_header/output_header.c b/example/output_header/output_header.c index 978c7a48eb0..11bf84d2dfb 100644 --- a/example/output_header/output_header.c +++ b/example/output_header/output_header.c @@ -158,7 +158,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); goto error; } diff --git a/example/protocol/Protocol.c b/example/protocol/Protocol.c index e6f4cd08246..3e85e86e058 100644 --- a/example/protocol/Protocol.c +++ b/example/protocol/Protocol.c @@ -112,7 +112,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); goto error; } @@ -130,7 +130,7 @@ TSPluginInit(int argc, const char *argv[]) accept_port = tmp; TSDebug(PLUGIN_NAME, "using accept_port %d", accept_port); } else { - TSError("[%s] Wrong argument for accept_port, using default port %d.", PLUGIN_NAME, accept_port); + TSError("[%s] Wrong argument for accept_port, using default port %d", PLUGIN_NAME, accept_port); } tmp = strtol(argv[2], &end, 10); @@ -138,7 +138,7 @@ TSPluginInit(int argc, const char *argv[]) server_port = tmp; TSDebug(PLUGIN_NAME, "using server_port %d", server_port); } else { - TSError("[%s] Wrong argument for server_port, using default port %d.", PLUGIN_NAME, server_port); + TSError("[%s] Wrong argument for server_port, using default port %d", PLUGIN_NAME, server_port); } } diff --git a/example/protocol_stack/protocol_stack.cc b/example/protocol_stack/protocol_stack.cc index eb2b427b0ec..24fa6868f2a 100644 --- a/example/protocol_stack/protocol_stack.cc +++ b/example/protocol_stack/protocol_stack.cc @@ -55,7 +55,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } TSHttpHookAdd(TS_HTTP_READ_REQUEST_HDR_HOOK, TSContCreate(proto_stack_cb, nullptr)); diff --git a/example/redirect_1/redirect_1.c b/example/redirect_1/redirect_1.c index c7b44500033..494e3974645 100644 --- a/example/redirect_1/redirect_1.c +++ b/example/redirect_1/redirect_1.c @@ -294,7 +294,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } if (argc == 3) { @@ -317,7 +317,7 @@ TSPluginInit(int argc, const char *argv[]) ip_deny = inet_addr(block_ip); - TSDebug(PLUGIN_NAME, "initializing stats..."); + TSDebug(PLUGIN_NAME, "initializing stats"); init_stats(); TSHttpHookAdd(TS_HTTP_READ_REQUEST_HDR_HOOK, TSContCreate(redirect_plugin, NULL)); diff --git a/example/remap_header_add/remap_header_add.cc b/example/remap_header_add/remap_header_add.cc index 417fcfa961c..d1152914423 100644 --- a/example/remap_header_add/remap_header_add.cc +++ b/example/remap_header_add/remap_header_add.cc @@ -92,7 +92,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *, int) TSDebug(PLUGIN_NAME, "TSRemapNewInstance()"); if (!argv || !ih) { - TSError("[%s] Unable to load plugin because missing argv or ih.", PLUGIN_NAME); + TSError("[%s] Unable to load plugin because missing argv or ih", PLUGIN_NAME); return TS_ERROR; } @@ -141,7 +141,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txn, TSRemapRequestInfo *rri) remap_line *rl = static_cast(ih); if (!rl || !rri) { - TSError("[%s] rl or rri is null.", PLUGIN_NAME); + TSError("[%s] rl or rri is null", PLUGIN_NAME); return TSREMAP_NO_REMAP; } diff --git a/example/replace_header/replace_header.c b/example/replace_header/replace_header.c index e3353c802bb..f69116b2f16 100644 --- a/example/replace_header/replace_header.c +++ b/example/replace_header/replace_header.c @@ -49,7 +49,7 @@ replace_header(TSHttpTxn txnp) TSMLoc field_loc; if (TSHttpTxnServerRespGet(txnp, &resp_bufp, &resp_loc) != TS_SUCCESS) { - TSError("[%s] Couldn't retrieve server response header.", PLUGIN_NAME); + TSError("[%s] Couldn't retrieve server response header", PLUGIN_NAME); goto done; } @@ -105,7 +105,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } TSHttpHookAdd(TS_HTTP_READ_RESPONSE_HDR_HOOK, TSContCreate(replace_header_plugin, NULL)); diff --git a/example/response_header_1/response_header_1.c b/example/response_header_1/response_header_1.c index 583a7cd6322..c5066fb8b78 100644 --- a/example/response_header_1/response_header_1.c +++ b/example/response_header_1/response_header_1.c @@ -228,7 +228,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } init_buffer_status = 0; diff --git a/example/server-push/server-push.c b/example/server-push/server-push.c index da256e13032..724599d137d 100644 --- a/example/server-push/server-push.c +++ b/example/server-push/server-push.c @@ -105,7 +105,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "ts-api-support@MyCompany.com"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } TSstrlcpy(url, argv[1], sizeof(url)); diff --git a/example/server-transform/server-transform.c b/example/server-transform/server-transform.c index 7a7af008c8b..ac917872422 100644 --- a/example/server-transform/server-transform.c +++ b/example/server-transform/server-transform.c @@ -193,7 +193,7 @@ transform_connect(TSCont contp, TransformData *data) ip_addr.sin_family = AF_INET; ip_addr.sin_addr.s_addr = server_ip; /* Should be in network byte order */ ip_addr.sin_port = server_port; - TSDebug("strans", "net connect.."); + TSDebug("strans", "net connect."); action = TSNetConnect(contp, (struct sockaddr const *)&ip_addr); if (!TSActionDone(action)) { @@ -519,7 +519,7 @@ transform_handler(TSCont contp, TSEvent event, void *edata) data = (TransformData *)TSContDataGet(contp); if (data == NULL) { - TSError("[server_transform] Didn't get Continuation's Data. Ignoring Event.."); + TSError("[server_transform] Didn't get Continuation's Data, ignoring event"); return 0; } TSDebug("strans", "transform handler event [%d], data->state = [%d]", event, data->state); @@ -647,7 +647,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "ts-api-support@MyCompany.com"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[server_transform] Plugin registration failed."); + TSError("[server_transform] Plugin registration failed"); } /* connect to the echo port on localhost */ diff --git a/example/ssl-preaccept/ssl-preaccept.cc b/example/ssl-preaccept/ssl-preaccept.cc index 91435a44745..3fa703f83c2 100644 --- a/example/ssl-preaccept/ssl-preaccept.cc +++ b/example/ssl-preaccept/ssl-preaccept.cc @@ -184,13 +184,13 @@ TSPluginInit(int argc, const char *argv[]) } if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError(PCP "registration failed."); + TSError(PCP "registration failed"); } else if (TSTrafficServerVersionGetMajor() < 2) { - TSError(PCP "requires Traffic Server 2.0 or later."); + TSError(PCP "requires Traffic Server 2.0 or later"); } else if (0 > Load_Configuration()) { - TSError(PCP "Failed to load config file."); + TSError(PCP "Failed to load config file"); } else if (nullptr == (cb_pa = TSContCreate(&CB_Pre_Accept, TSMutexCreate()))) { - TSError(PCP "Failed to pre-accept callback."); + TSError(PCP "Failed to pre-accept callback"); } else { TSHttpHookAdd(TS_VCONN_PRE_ACCEPT_HOOK, cb_pa); success = true; diff --git a/example/ssl-sni-whitelist/ssl-sni-whitelist.cc b/example/ssl-sni-whitelist/ssl-sni-whitelist.cc index 65e0a2b76bd..c4dd585714d 100644 --- a/example/ssl-sni-whitelist/ssl-sni-whitelist.cc +++ b/example/ssl-sni-whitelist/ssl-sni-whitelist.cc @@ -133,13 +133,13 @@ TSPluginInit(int argc, const char *argv[]) } if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError(PCP "registration failed."); + TSError(PCP "registration failed"); } else if (TSTrafficServerVersionGetMajor() < 2) { - TSError(PCP "requires Traffic Server 2.0 or later."); + TSError(PCP "requires Traffic Server 2.0 or later"); } else if (0 > Load_Configuration()) { - TSError(PCP "Failed to load config file."); + TSError(PCP "Failed to load config file"); } else if (nullptr == (cb_sni = TSContCreate(&CB_servername_whitelist, TSMutexCreate()))) { - TSError(PCP "Failed to create SNI callback."); + TSError(PCP "Failed to create SNI callback"); } else { TSHttpHookAdd(TS_SSL_CERT_HOOK, cb_sni); success = true; @@ -158,7 +158,7 @@ TSPluginInit(int argc, const char *argv[]) void TSPluginInit(int, const char *[]) { - TSError(PCP "requires TLS SNI which is not available."); + TSError(PCP "requires TLS SNI which is not available"); } #endif // TS_USE_TLS_SNI diff --git a/example/ssl-sni/ssl-sni.cc b/example/ssl-sni/ssl-sni.cc index 9c0cca45a07..110f8616c79 100644 --- a/example/ssl-sni/ssl-sni.cc +++ b/example/ssl-sni/ssl-sni.cc @@ -154,13 +154,13 @@ TSPluginInit(int argc, const char *argv[]) } if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError(PCP "registration failed."); + TSError(PCP "registration failed"); } else if (TSTrafficServerVersionGetMajor() < 2) { - TSError(PCP "requires Traffic Server 2.0 or later."); + TSError(PCP "requires Traffic Server 2.0 or later"); } else if (0 > Load_Configuration()) { - TSError(PCP "Failed to load config file."); + TSError(PCP "Failed to load config file"); } else if (nullptr == (cb_cert = TSContCreate(&CB_servername, TSMutexCreate()))) { - TSError(PCP "Failed to create cert callback."); + TSError(PCP "Failed to create cert callback"); } else { TSHttpHookAdd(TS_SSL_CERT_HOOK, cb_cert); success = true; @@ -179,7 +179,7 @@ TSPluginInit(int argc, const char *argv[]) void TSPluginInit(int, const char *[]) { - TSError(PCP "requires TLS SNI which is not available."); + TSError(PCP "requires TLS SNI which is not available"); } #endif // TS_USE_TLS_SNI diff --git a/example/thread-1/thread-1.c b/example/thread-1/thread-1.c index ff6daa7efcb..28ea799a5c7 100644 --- a/example/thread-1/thread-1.c +++ b/example/thread-1/thread-1.c @@ -74,7 +74,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = "ts-api-support@MyCompany.com"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[thread-1] Plugin registration failed."); + TSError("[thread-1] Plugin registration failed"); } TSHttpHookAdd(TS_HTTP_OS_DNS_HOOK, TSContCreate(thread_plugin, NULL)); diff --git a/example/thread-pool/psi.c b/example/thread-pool/psi.c index 97025975604..02bc801a354 100644 --- a/example/thread-pool/psi.c +++ b/example/thread-pool/psi.c @@ -664,7 +664,7 @@ handle_transform(TSCont contp) Job *new_job; /* Add a request to include a file into the jobs queue.. */ /* We'll be called back once it's done with an EVENT_IMMEDIATE */ - TSDebug(DBG_TAG, "Psi filename extracted. Adding an include job to thread queue."); + TSDebug(DBG_TAG, "Psi filename extracted, adding an include job to thread queue"); data->state = STATE_READ_PSI; /* Create a new job request and add it to the queue */ @@ -982,7 +982,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED) info.support_email = ""; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[psi] Plugin registration failed."); + TSError("[psi] Plugin registration failed"); } /* Initialize the psi directory = /include */ diff --git a/iocore/cache/CacheDisk.cc b/iocore/cache/CacheDisk.cc index 44329edbb21..551301dd415 100644 --- a/iocore/cache/CacheDisk.cc +++ b/iocore/cache/CacheDisk.cc @@ -271,9 +271,6 @@ CacheDisk::create_volume(int number, off_t size_in_blocks, int scheme) } } - if (!p && !closest_match) - return nullptr; - if (!p && closest_match) { /* allocate from the closest match */ q = closest_match; diff --git a/iocore/cache/CacheRead.cc b/iocore/cache/CacheRead.cc index 215f0a99c84..b1e487585b0 100644 --- a/iocore/cache/CacheRead.cc +++ b/iocore/cache/CacheRead.cc @@ -585,10 +585,17 @@ CacheVC::openReadReadDone(int event, Event *e) } // fall through for truncated documents } -Lerror: +Lerror : { char tmpstring[100]; - Warning("Document %s truncated", earliest_key.toHexStr(tmpstring)); + if (request.valid()) { + int url_length; + const char *url_text = request.url_get()->string_get_ref(&url_length); + Warning("Document %s truncated, url[%.*s]", earliest_key.toHexStr(tmpstring), url_length, url_text); + } else { + Warning("Document %s truncated", earliest_key.toHexStr(tmpstring)); + } return calluser(VC_EVENT_ERROR); +} Ldone: return calluser(VC_EVENT_EOS); Lcallreturn: diff --git a/iocore/cache/I_Cache.h b/iocore/cache/I_Cache.h index 2ed8dca4660..32d819c0db5 100644 --- a/iocore/cache/I_Cache.h +++ b/iocore/cache/I_Cache.h @@ -71,7 +71,7 @@ struct CacheProcessor : public Processor { { } - virtual int start(int n_cache_threads = 0, size_t stacksize = DEFAULT_STACKSIZE); + virtual int start(int n_cache_threads = 0, size_t stacksize = DEFAULT_STACKSIZE) override; virtual int start_internal(int flags = 0); void stop(); diff --git a/iocore/dns/DNS.cc b/iocore/dns/DNS.cc index 454ee005d72..6ffd71e6bb9 100644 --- a/iocore/dns/DNS.cc +++ b/iocore/dns/DNS.cc @@ -193,13 +193,14 @@ DNSProcessor::start(int, size_t stacksize) if (dns_thread > 0) { // TODO: Hmmm, should we just get a single thread some other way? - ET_DNS = eventProcessor.spawn_event_threads(1, "ET_DNS", stacksize); - initialize_thread_for_net(eventProcessor.eventthread[ET_DNS][0]); + ET_DNS = eventProcessor.register_event_type("ET_DNS"); + eventProcessor.schedule_spawn(&initialize_thread_for_net, ET_DNS); + eventProcessor.spawn_event_threads(ET_DNS, 1, stacksize); } else { // Initialize the first event thread for DNS. ET_DNS = ET_CALL; } - thread = eventProcessor.eventthread[ET_DNS][0]; + thread = eventProcessor.thread_group[ET_DNS]._thread[0]; dns_failover_try_period = dns_timeout + 1; // Modify the "default" accordingly diff --git a/iocore/dns/SplitDNS.cc b/iocore/dns/SplitDNS.cc index 096bfb35238..a7fe721224c 100644 --- a/iocore/dns/SplitDNS.cc +++ b/iocore/dns/SplitDNS.cc @@ -495,7 +495,7 @@ SplitDNSRecord::Init(matcher_line *line_info) m_servers.x_dnsH = dnsH; SET_CONTINUATION_HANDLER(dnsH, &DNSHandler::startEvent_sdns); - (eventProcessor.eventthread[ET_DNS][0])->schedule_imm(dnsH); + (eventProcessor.thread_group[ET_DNS]._thread[0])->schedule_imm(dnsH); /* ----------------------------------------------------- Process any modifiers to the directive, if they exist diff --git a/iocore/eventsystem/I_EThread.h b/iocore/eventsystem/I_EThread.h index e93784ab1c2..d23cf28fc12 100644 --- a/iocore/eventsystem/I_EThread.h +++ b/iocore/eventsystem/I_EThread.h @@ -49,7 +49,6 @@ class Continuation; enum ThreadType { REGULAR = 0, - MONITOR, DEDICATED, }; @@ -253,28 +252,33 @@ class EThread : public Thread */ Event *schedule_every_local(Continuation *c, ink_hrtime aperiod, int callback_event = EVENT_INTERVAL, void *cookie = nullptr); + /** Schedule an event called once when the thread is spawned. + + This is useful only for regular threads and if called before @c Thread::start. The event will be + called first before the event loop. + + @Note This will override the event for a dedicate thread so that this is called instead of the + event passed to the constructor. + */ + Event *schedule_spawn(Continuation *c, int ev = EVENT_IMMEDIATE, void *cookie = NULL); + /* private */ Event *schedule_local(Event *e); - InkRand generator; - -private: - // prevent unauthorized copies (Not implemented) - EThread(const EThread &); - EThread &operator=(const EThread &); + InkRand generator = static_cast(Thread::get_hrtime_updated() ^ reinterpret_cast(this)); /*-------------------------------------------------------*\ | UNIX Interface | \*-------------------------------------------------------*/ -public: EThread(); EThread(ThreadType att, int anid); EThread(ThreadType att, Event *e); + EThread(const EThread &) = delete; + EThread &operator=(const EThread &) = delete; virtual ~EThread(); - Event *schedule_spawn(Continuation *cont); Event *schedule(Event *e, bool fast_signal = false); /** Block of memory to allocate thread specific data e.g. stat system arrays. */ @@ -292,8 +296,9 @@ class EThread : public Thread EThread **ethreads_to_be_signalled = nullptr; int n_ethreads_to_be_signalled = 0; - int id; - unsigned int event_types = 0; + static const int NO_ETHREAD_ID = -1; + int id = NO_ETHREAD_ID; + unsigned int event_types = 0; bool is_event_type(EventType et); void set_event_type(EventType et); @@ -311,8 +316,16 @@ class EThread : public Thread #endif EventIO *ep = nullptr; - ThreadType tt = REGULAR; - Event *oneevent = nullptr; // For dedicated event thread + ThreadType tt = REGULAR; + /** Initial event to call, before any scheduling. + + For dedicated threads this is the only event called. + For regular threads this is called first before the event loop starts. + @internal For regular threads this is used by the EventProcessor to get called back after + the thread starts but before any other events can be dispatched to provide initializations + needed for the thread. + */ + Event *start_event = nullptr; ServerSessionPool *server_session_pool = nullptr; }; diff --git a/iocore/eventsystem/I_EventProcessor.h b/iocore/eventsystem/I_EventProcessor.h index 53923d569d5..e89243fcd5e 100644 --- a/iocore/eventsystem/I_EventProcessor.h +++ b/iocore/eventsystem/I_EventProcessor.h @@ -30,15 +30,15 @@ #include "I_Event.h" #ifdef TS_MAX_THREADS_IN_EACH_THREAD_TYPE -const int MAX_THREADS_IN_EACH_TYPE = TS_MAX_THREADS_IN_EACH_THREAD_TYPE; +constexpr int MAX_THREADS_IN_EACH_TYPE = TS_MAX_THREADS_IN_EACH_THREAD_TYPE; #else -const int MAX_THREADS_IN_EACH_TYPE = 3072; +constexpr int MAX_THREADS_IN_EACH_TYPE = 3072; #endif #ifdef TS_MAX_NUMBER_EVENT_THREADS -const int MAX_EVENT_THREADS = TS_MAX_NUMBER_EVENT_THREADS; +constexpr int MAX_EVENT_THREADS = TS_MAX_NUMBER_EVENT_THREADS; #else -const int MAX_EVENT_THREADS = 4096; +constxpr int MAX_EVENT_THREADS = 4096; #endif class EThread; @@ -66,17 +66,16 @@ class EThread; Thread Groups (Event types): - When the EventProcessor is started, the first group of threads is - spawned and it is assigned the special id ET_CALL. Depending on the - complexity of the state machine or protocol, you may be interested - in creating additional threads and the EventProcessor gives you the - ability to create a single thread or an entire group of threads. In - the former case, you call spawn_thread and the thread is independent - of the thread groups and it exists as long as your continuation handle - executes and there are events to process. In the latter, you call - spawn_event_theads which creates a new thread group and you get an id - or event type with wich you must keep for use later on when scheduling - continuations on that group. + When the EventProcessor is started, the first group of threads is spawned and it is assigned the + special id ET_CALL. Depending on the complexity of the state machine or protocol, you may be + interested in creating additional threads and the EventProcessor gives you the ability to create a + single thread or an entire group of threads. In the former case, you call spawn_thread and the + thread is independent of the thread groups and it exists as long as your continuation handle + executes and there are events to process. In the latter, you call @c registerEventType to get an + event type and then @c spawn_event_theads which creates the threads in the group of that + type. Such threads require events to be scheduled on a specicif thread in the group or for the + grouop in general using the event type. Note that between these two calls @c + EThread::schedule_spawn can be used to set up per thread initialization. Callback event codes: @@ -100,6 +99,13 @@ class EThread; class EventProcessor : public Processor { public: + /** Register an event type with @a name. + + This must be called to get an event type to pass to @c spawn_event_threads + @see spawn_event_threads + */ + EventType register_event_type(char const *name); + /** Spawn an additional thread for calling back the continuation. Spawns a dedicated thread (EThread) that calls back the continuation passed @@ -112,16 +118,18 @@ class EventProcessor : public Processor */ Event *spawn_thread(Continuation *cont, const char *thr_name, size_t stacksize = 0); - /** - Spawns a group of threads for an event type. Spawns the number of - event threads passed in (n_threads) creating a thread group and - returns the thread group id (or EventType). See the remarks section - for Thread Groups. + /** Spawn a group of @a n_threads event dispatching threads. - @return EventType or thread id for the new group of threads. + The threads run an event loop which dispatches events scheduled for a specific thread or the event type. + + @return EventType or thread id for the new group of threads (@a ev_type) */ - EventType spawn_event_threads(int n_threads, const char *et_name, size_t stacksize); + EventType spawn_event_threads(EventType ev_type, int n_threads, size_t stacksize = DEFAULT_STACKSIZE); + + /// Convenience overload. + /// This registers @a name as an event type using @c registerEventType and then calls the real @c spawn_event_threads + EventType spawn_event_threads(const char *name, int n_thread, size_t stacksize = DEFAULT_STACKSIZE); /** Schedules the continuation on a specific EThread to receive an event @@ -228,7 +236,22 @@ class EventProcessor : public Processor Event *reschedule_in(Event *e, ink_hrtime atimeout_in, int callback_event = EVENT_INTERVAL); Event *reschedule_every(Event *e, ink_hrtime aperiod, int callback_event = EVENT_INTERVAL); + /// Schedule an @a event on continuation @a c when a thread of type @a ev_type is spawned. + /// The @a cookie is attached to the event instance passed to the continuation. + /// @return The scheduled event. + Event *schedule_spawn(Continuation *c, EventType ev_type, int event = EVENT_IMMEDIATE, void *cookie = NULL); + + /// Schedule the function @a f to be called in a thread of type @a ev_type when it is spawned. + Event *schedule_spawn(void (*f)(EThread *), EventType ev_type); + + /// Schedule an @a event on continuation @a c to be called when a thread is spawned by this processor. + /// The @a cookie is attached to the event instance passed to the continuation. + /// @return The scheduled event. + // Event *schedule_spawn(Continuation *c, int event, void *cookie = NULL); + EventProcessor(); + EventProcessor(const EventProcessor &) = delete; + EventProcessor &operator=(const EventProcessor &) = delete; /** Initializes the EventProcessor and its associated threads. Spawns the @@ -239,7 +262,7 @@ class EventProcessor : public Processor @return 0 if successful, and a negative value otherwise. */ - int start(int n_net_threads, size_t stacksize = DEFAULT_STACKSIZE); + int start(int n_net_threads, size_t stacksize = DEFAULT_STACKSIZE) override; /** Stop the EventProcessor. Attempts to stop the EventProcessor and @@ -273,10 +296,24 @@ class EventProcessor : public Processor thread group id and the second the EThread pointers for that group. */ - EThread *eventthread[MAX_EVENT_TYPES][MAX_THREADS_IN_EACH_TYPE]; - - unsigned int next_thread_for_type[MAX_EVENT_TYPES]; - int n_threads_for_type[MAX_EVENT_TYPES]; + // EThread *eventthread[MAX_EVENT_TYPES][MAX_THREADS_IN_EACH_TYPE]; + + /// Data kept for each thread group. + /// The thread group ID is the index into an array of these and so is not stored explicitly. + struct ThreadGroupDescriptor { + ats_scoped_str _name; ///< Name for the thread group. + int _count; ///< # of threads of this type. + int _next_round_robin; ///< Index of thread to use for events assigned to this group. + Que(Event, link) _spawnQueue; ///< Events to dispatch when thread is spawned. + /// The actual threads in this group. + EThread *_thread[MAX_THREADS_IN_EACH_TYPE]; + }; + + /// Storage for per group data. + ThreadGroupDescriptor thread_group[MAX_EVENT_TYPES]; + + /// Number of defined thread groups. + int n_thread_groups; /** Total number of threads controlled by this EventProcessor. This is @@ -286,17 +323,23 @@ class EventProcessor : public Processor */ int n_ethreads; - /** - Total number of thread groups created so far. This is the count of - all the thread groups (event types) created for this EventProcessor. - - */ - int n_thread_groups; - -private: - // prevent unauthorized copies (Not implemented) - EventProcessor(const EventProcessor &); - EventProcessor &operator=(const EventProcessor &); + // Containerized access to threads. + struct active_thread_group_type { + using iterator = EThread **; + iterator _begin; + iterator _end; + active_thread_group_type(EThread **b, int n) : _begin(b), _end(b + n) {} + iterator + begin() + { + return _begin; + } + iterator + end() + { + return _end; + } + }; public: /*------------------------------------------------------*\ @@ -307,8 +350,43 @@ class EventProcessor : public Processor EThread *assign_thread(EventType etype); EThread *all_dthreads[MAX_EVENT_THREADS]; - int n_dthreads; // No. of dedicated threads + volatile int n_dthreads; // No. of dedicated threads volatile int thread_data_used; + + active_thread_group_type + active_ethreads() + { + return {all_ethreads, n_ethreads}; + } + active_thread_group_type + active_dthreads() + { + return {all_dthreads, n_dthreads}; + } + +private: + void initThreadState(EThread *); + + /// Used to generate a callback at the start of thread execution. + class ThreadInit : public Continuation + { + typedef ThreadInit self; + EventProcessor *_evp; + + public: + ThreadInit(EventProcessor *evp) : _evp(evp) { SET_HANDLER(&self::init); } + int + init(int /* event ATS_UNUSED */, Event *ev) + { + _evp->initThreadState(ev->ethread); + return 0; + } + }; + friend class ThreadInit; + ThreadInit thread_initializer; + + // Lock write access to the dedicated thread vector. + Ptr dedicated_thread_spawn_lock{new_ProxyMutex()}; }; extern inkcoreapi class EventProcessor eventProcessor; diff --git a/iocore/eventsystem/I_Thread.h b/iocore/eventsystem/I_Thread.h index cdbcceb464e..794bbaa9f27 100644 --- a/iocore/eventsystem/I_Thread.h +++ b/iocore/eventsystem/I_Thread.h @@ -63,16 +63,20 @@ #error "include I_EventSystem.h or P_EventSystem.h" #endif +#include + #include "ts/ink_platform.h" #include "ts/ink_thread.h" #include "I_ProxyAllocator.h" -class Thread; class ProxyMutex; -typedef void *(*ThreadFunction)(void *arg); -static const int MAX_THREAD_NAME_LENGTH = 16; -static const int DEFAULT_STACKSIZE = 1048576; // 1MB +constexpr int MAX_THREAD_NAME_LENGTH = 16; + +#define THREADAPI + +typedef void *THREADAPI_RETURN_TYPE; +typedef std::function ThreadFunction; /** Base class for the threads in the Event System. Thread is the base @@ -145,7 +149,7 @@ class Thread Thread &operator=(const Thread &); public: - ink_thread start(const char *name, size_t stacksize, ThreadFunction f, void *a, void *stack); + ink_thread start(const char *name, void *stack, size_t stacksize, ThreadFunction const &f = ThreadFunction()); virtual void execute() diff --git a/iocore/eventsystem/I_VConnection.h b/iocore/eventsystem/I_VConnection.h index b6721aac189..27c6d97aafc 100644 --- a/iocore/eventsystem/I_VConnection.h +++ b/iocore/eventsystem/I_VConnection.h @@ -312,9 +312,6 @@ class VConnection : public Continuation VConnection(ProxyMutex *aMutex); VConnection(Ptr &aMutex); - /** @deprecated */ - VIO *do_io(int op, Continuation *c = nullptr, int64_t nbytes = INT64_MAX, MIOBuffer *buf = 0, int data = 0); - // Private // Set continuation on a given vio. The public interface // is through VIO::set_continuation() diff --git a/iocore/eventsystem/P_UnixEThread.h b/iocore/eventsystem/P_UnixEThread.h index 92810a09d04..0b68e5e7000 100644 --- a/iocore/eventsystem/P_UnixEThread.h +++ b/iocore/eventsystem/P_UnixEThread.h @@ -36,13 +36,6 @@ const int DELAY_FOR_RETRY = HRTIME_MSECONDS(10); -TS_INLINE Event * -EThread::schedule_spawn(Continuation *cont) -{ - Event *e = EVENT_ALLOC(eventAllocator, this); - return schedule(e->init(cont, 0, 0)); -} - TS_INLINE Event * EThread::schedule_imm(Continuation *cont, int callback_event, void *cookie) { @@ -156,6 +149,21 @@ EThread::schedule_local(Event *e) return e; } +TS_INLINE Event * +EThread::schedule_spawn(Continuation *c, int ev, void *cookie) +{ + ink_assert(this != this_ethread()); // really broken to call this from the same thread. + if (start_event) + free_event(start_event); + start_event = EVENT_ALLOC(eventAllocator, this); + start_event->ethread = this; + start_event->mutex = this->mutex; + start_event->init(c); + start_event->callback_event = ev; + start_event->cookie = cookie; + return start_event; +} + TS_INLINE EThread * this_ethread() { diff --git a/iocore/eventsystem/P_UnixEventProcessor.h b/iocore/eventsystem/P_UnixEventProcessor.h index 5ee486761d7..2c5d9d07d4d 100644 --- a/iocore/eventsystem/P_UnixEventProcessor.h +++ b/iocore/eventsystem/P_UnixEventProcessor.h @@ -30,12 +30,13 @@ const int LOAD_BALANCE_INTERVAL = 1; TS_INLINE -EventProcessor::EventProcessor() : n_ethreads(0), n_thread_groups(0), n_dthreads(0), thread_data_used(0) +EventProcessor::EventProcessor() : n_thread_groups(0), n_ethreads(0), n_dthreads(0), thread_data_used(0), thread_initializer(this) { - memset(all_ethreads, 0, sizeof(all_ethreads)); - memset(all_dthreads, 0, sizeof(all_dthreads)); - memset(n_threads_for_type, 0, sizeof(n_threads_for_type)); - memset(next_thread_for_type, 0, sizeof(next_thread_for_type)); + ink_zero(all_ethreads); + ink_zero(all_dthreads); + ink_zero(thread_group); + // Because ET_NET is compile time set to 0 it *must* be the first type registered. + this->register_event_type("ET_NET"); } TS_INLINE off_t @@ -59,13 +60,14 @@ TS_INLINE EThread * EventProcessor::assign_thread(EventType etype) { int next; + ThreadGroupDescriptor *tg = &thread_group[etype]; ink_assert(etype < MAX_EVENT_TYPES); - if (n_threads_for_type[etype] > 1) - next = next_thread_for_type[etype]++ % n_threads_for_type[etype]; + if (tg->_count > 1) + next = tg->_next_round_robin++ % tg->_count; else next = 0; - return (eventthread[etype][next]); + return tg->_thread[next]; } TS_INLINE Event * diff --git a/iocore/eventsystem/P_VConnection.h b/iocore/eventsystem/P_VConnection.h index ac8fbfcd414..a9eea7407e2 100644 --- a/iocore/eventsystem/P_VConnection.h +++ b/iocore/eventsystem/P_VConnection.h @@ -71,18 +71,6 @@ VConnection::~VConnection() { } -////////////////////////////////////////////////////////////////////////////// -// -// DEPRECATED DEPRECATED DEPRECATED -// -// inline VIO * VConnection::do_io() -// -// This method enqueues a VIO operation onto the VIO queue, and -// activates the I/O operation if and operation of that type isn't -// already underway. -// -////////////////////////////////////////////////////////////////////////////// - TS_INLINE VIO * vc_do_io_write(VConnection *vc, Continuation *cont, int64_t nbytes, MIOBuffer *buf, int64_t offset) { @@ -94,34 +82,6 @@ vc_do_io_write(VConnection *vc, Continuation *cont, int64_t nbytes, MIOBuffer *b return vc->do_io_write(cont, nbytes, reader, true); } -TS_INLINE VIO * -VConnection::do_io(int op, Continuation *c, int64_t nbytes, MIOBuffer *cb, int data) -{ - switch (op) { - case VIO::READ: - return do_io_read(c, nbytes, cb); - case VIO::WRITE: - return vc_do_io_write(this, c, nbytes, cb, data); - case VIO::CLOSE: - do_io_close(); - return nullptr; - case VIO::ABORT: - do_io_close(data); - return nullptr; - case VIO::SHUTDOWN_READ: - do_io_shutdown(IO_SHUTDOWN_READ); - return nullptr; - case VIO::SHUTDOWN_WRITE: - do_io_shutdown(IO_SHUTDOWN_WRITE); - return nullptr; - case VIO::SHUTDOWN_READWRITE: - do_io_shutdown(IO_SHUTDOWN_READWRITE); - return nullptr; - } - ink_assert(!"cannot use default implementation for do_io operation"); - return nullptr; -} - TS_INLINE void VConnection::set_continuation(VIO *, Continuation *) { diff --git a/iocore/eventsystem/Tasks.cc b/iocore/eventsystem/Tasks.cc index 58dc1ef1f55..63f72baaddb 100644 --- a/iocore/eventsystem/Tasks.cc +++ b/iocore/eventsystem/Tasks.cc @@ -33,7 +33,7 @@ int TasksProcessor::start(int task_threads, size_t stacksize) { if (task_threads > 0) { - ET_TASK = eventProcessor.spawn_event_threads(task_threads, "ET_TASK", stacksize); + ET_TASK = eventProcessor.spawn_event_threads("ET_TASK", task_threads, stacksize); } return 0; } diff --git a/iocore/eventsystem/Thread.cc b/iocore/eventsystem/Thread.cc index af0409916b6..d54d85374d9 100644 --- a/iocore/eventsystem/Thread.cc +++ b/iocore/eventsystem/Thread.cc @@ -37,10 +37,10 @@ static ink_thread_key init_thread_key(); -ink_hrtime Thread::cur_time = 0; +ink_hrtime Thread::cur_time = ink_get_hrtime_internal(); inkcoreapi ink_thread_key Thread::thread_data_key = init_thread_key(); -Thread::Thread() +Thread::Thread() : tid(0) { mutex = new_ProxyMutex(); MUTEX_TAKE_LOCK(mutex, (EThread *)this); @@ -73,7 +73,6 @@ init_thread_key() struct thread_data_internal { ThreadFunction f; - void *a; Thread *me; char name[MAX_THREAD_NAME_LENGTH]; }; @@ -81,32 +80,31 @@ struct thread_data_internal { static void * spawn_thread_internal(void *a) { - thread_data_internal *p = (thread_data_internal *)a; + auto *p = static_cast(a); p->me->set_specific(); ink_set_thread_name(p->name); + if (p->f) - p->f(p->a); + p->f(); else p->me->execute(); - ats_free(a); + + delete p; return nullptr; } ink_thread -Thread::start(const char *name, size_t stacksize, ThreadFunction f, void *a, void *stack) +Thread::start(const char *name, void *stack, size_t stacksize, ThreadFunction const &f) { - thread_data_internal *p = (thread_data_internal *)ats_malloc(sizeof(thread_data_internal)); + auto *p = new thread_data_internal{f, this, ""}; - p->f = f; - p->a = a; - p->me = this; - memset(p->name, 0, MAX_THREAD_NAME_LENGTH); + ink_zero(p->name); ink_strlcpy(p->name, name, MAX_THREAD_NAME_LENGTH); if (stacksize == 0) { stacksize = DEFAULT_STACKSIZE; } - tid = ink_thread_create(spawn_thread_internal, (void *)p, 0, stacksize, stack); + tid = ink_thread_create(spawn_thread_internal, p, 0, stacksize, stack); return tid; } diff --git a/iocore/eventsystem/UnixEThread.cc b/iocore/eventsystem/UnixEThread.cc index 1c508b0b7bd..3ccb25dcbfd 100644 --- a/iocore/eventsystem/UnixEThread.cc +++ b/iocore/eventsystem/UnixEThread.cc @@ -37,17 +37,15 @@ struct AIOCallback; #define MAX_HEARTBEATS_MISSED 10 #define NO_HEARTBEAT -1 #define THREAD_MAX_HEARTBEAT_MSECONDS 60 -#define NO_ETHREAD_ID -1 volatile bool shutdown_event_system = false; -EThread::EThread() : generator((uint64_t)Thread::get_hrtime_updated() ^ (uint64_t)(uintptr_t)this), id(NO_ETHREAD_ID) +EThread::EThread() { memset(thread_private, 0, PER_THREAD_DATA); } -EThread::EThread(ThreadType att, int anid) - : generator((uint64_t)Thread::get_hrtime_updated() ^ (uint64_t)(uintptr_t)this), id(anid), tt(att) +EThread::EThread(ThreadType att, int anid) : id(anid), tt(att) { ethreads_to_be_signalled = (EThread **)ats_malloc(MAX_EVENT_THREADS * sizeof(EThread *)); memset((char *)ethreads_to_be_signalled, 0, MAX_EVENT_THREADS * sizeof(EThread *)); @@ -75,15 +73,7 @@ EThread::EThread(ThreadType att, int anid) #endif } -EThread::EThread(ThreadType att, Event *e) - : generator((uint32_t)((uintptr_t)time(nullptr) ^ (uintptr_t)this)), - ethreads_to_be_signalled(nullptr), - n_ethreads_to_be_signalled(0), - id(NO_ETHREAD_ID), - event_types(0), - signal_hook(nullptr), - tt(att), - oneevent(e) +EThread::EThread(ThreadType att, Event *e) : tt(att), start_event(e) { ink_assert(att == DEDICATED); memset(thread_private, 0, PER_THREAD_DATA); @@ -103,7 +93,7 @@ EThread::~EThread() bool EThread::is_event_type(EventType et) { - return !!(event_types & (1 << (int)et)); + return (event_types & (1 << static_cast(et))) != 0; } void @@ -161,6 +151,16 @@ EThread::process_event(Event *e, int calling_code) void EThread::execute() { + // Do the start event first. + // coverity[lock] + if (start_event) { + MUTEX_TAKE_LOCK_FOR(start_event->mutex, this, start_event->continuation); + start_event->continuation->handleEvent(EVENT_IMMEDIATE, start_event); + MUTEX_UNTAKE_LOCK(start_event->mutex, this); + free_event(start_event); + start_event = nullptr; + } + switch (tt) { case REGULAR: { Event *e; @@ -274,11 +274,6 @@ EThread::execute() } case DEDICATED: { - // coverity[lock] - MUTEX_TAKE_LOCK_FOR(oneevent->mutex, this, oneevent->continuation); - oneevent->continuation->handleEvent(EVENT_IMMEDIATE, oneevent); - MUTEX_UNTAKE_LOCK(oneevent->mutex, this); - free_event(oneevent); break; } diff --git a/iocore/eventsystem/UnixEventProcessor.cc b/iocore/eventsystem/UnixEventProcessor.cc index 853d347a13b..9048208a3dc 100644 --- a/iocore/eventsystem/UnixEventProcessor.cc +++ b/iocore/eventsystem/UnixEventProcessor.cc @@ -32,226 +32,223 @@ #include "ts/ink_defs.h" #include "ts/hugepages.h" -EventType -EventProcessor::spawn_event_threads(int n_threads, const char *et_name, size_t stacksize) -{ - char thr_name[MAX_THREAD_NAME_LENGTH]; - EventType new_thread_group_id; - int i; - - ink_release_assert(n_threads > 0); - ink_release_assert((n_ethreads + n_threads) <= MAX_EVENT_THREADS); - ink_release_assert(n_thread_groups < MAX_EVENT_TYPES); - - new_thread_group_id = (EventType)n_thread_groups; - - for (i = 0; i < n_threads; i++) { - EThread *t = new EThread(REGULAR, n_ethreads + i); - all_ethreads[n_ethreads + i] = t; - eventthread[new_thread_group_id][i] = t; - t->set_event_type(new_thread_group_id); - } - - n_threads_for_type[new_thread_group_id] = n_threads; - for (i = 0; i < n_threads; i++) { - snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[%s %d]", et_name, i); - eventthread[new_thread_group_id][i]->start(thr_name, stacksize, nullptr, nullptr, nullptr); - } - - n_thread_groups++; - n_ethreads += n_threads; - Debug("iocore_thread", "Created thread group '%s' id %d with %d threads", et_name, new_thread_group_id, n_threads); - - return new_thread_group_id; -} +/// Global singleton. +class EventProcessor eventProcessor; -static void * -alloc_stack(size_t stacksize) +class ThreadAffinityInitializer : public Continuation { - void *stack = nullptr; - - if (ats_hugepage_enabled()) { - stack = ats_alloc_hugepage(stacksize); - } - - if (stack == nullptr) { - stack = ats_memalign(ats_pagesize(), stacksize); - } + typedef ThreadAffinityInitializer self; + +public: + /// Default construct. + ThreadAffinityInitializer() { SET_HANDLER(&self::set_affinity); } + /// Load up basic affinity data. + void init(); + /// Set the affinity for the current thread. + int set_affinity(int, Event *); + +#if defined(HAVE_HWLOC_OBJ_PU) +private: + hwloc_ob_type _type; + int _count; + char const *_name; +#endif +}; - return stack; -} +ThreadAffinityInitializer Thread_Affinity_Initializer; -#if TS_USE_HWLOC -static void * -alloc_numa_stack(hwloc_cpuset_t cpuset, size_t stacksize) +namespace { - hwloc_membind_policy_t mem_policy = HWLOC_MEMBIND_DEFAULT; - hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); - int num_nodes = 0; - void *stack = nullptr; - - // Find the NUMA node set that correlates to our next thread CPU set - hwloc_cpuset_to_nodeset(ink_get_topology(), cpuset, nodeset); - // How many NUMA nodes will we be needing to allocate across? - num_nodes = hwloc_get_nbobjs_inside_cpuset_by_type(ink_get_topology(), cpuset, HWLOC_OBJ_NODE); - - if (num_nodes == 1) { - // The preferred memory policy. The thread lives in one NUMA node. - mem_policy = HWLOC_MEMBIND_BIND; - } else if (num_nodes > 1) { - // If we have mode than one NUMA node we should interleave over them. - mem_policy = HWLOC_MEMBIND_INTERLEAVE; - } - - if (mem_policy != HWLOC_MEMBIND_DEFAULT) { - // Let's temporarily set the memory binding to our destination NUMA node - hwloc_set_membind_nodeset(ink_get_topology(), nodeset, mem_policy, HWLOC_MEMBIND_THREAD); - } - - // Alloc our stack - stack = alloc_stack(stacksize); - - if (mem_policy != HWLOC_MEMBIND_DEFAULT) { - // Now let's set it back to default for this thread. - hwloc_set_membind_nodeset(ink_get_topology(), hwloc_topology_get_topology_nodeset(ink_get_topology()), HWLOC_MEMBIND_DEFAULT, - HWLOC_MEMBIND_THREAD); +class ThreadInitByFunc : public Continuation +{ +public: + ThreadInitByFunc() { SET_HANDLER(&ThreadInitByFunc::invoke); } + int + invoke(int, Event *ev) + { + void (*f)(EThread *) = reinterpret_cast(ev->cookie); + f(ev->ethread); + return 0; } - - hwloc_bitmap_free(nodeset); - - return stack; +} Thread_Init_Func; } -#endif // TS_USE_HWLOC - -class EventProcessor eventProcessor; -int -EventProcessor::start(int n_event_threads, size_t stacksize) +void +ThreadAffinityInitializer::init() { - char thr_name[MAX_THREAD_NAME_LENGTH]; - int i; - void *stack = nullptr; - - // do some sanity checking. - static int started = 0; - ink_release_assert(!started); - ink_release_assert(n_event_threads > 0 && n_event_threads <= MAX_EVENT_THREADS); - started = 1; - - n_ethreads = n_event_threads; - n_thread_groups = 1; - - // Make sure that our thread stack size is at least the minimum size - stacksize = MAX(stacksize, INK_THREAD_STACK_MIN); - - // Make sure it is a multiple of our page size - if (ats_hugepage_enabled()) { - stacksize = INK_ALIGN(stacksize, ats_hugepage_size()); - } else { - stacksize = INK_ALIGN(stacksize, ats_pagesize()); - } - - Debug("iocore_thread", "Thread stack size set to %zu", stacksize); - - for (i = 0; i < n_event_threads; i++) { - EThread *t = new EThread(REGULAR, i); - all_ethreads[i] = t; - - eventthread[ET_CALL][i] = t; - t->set_event_type((EventType)ET_CALL); - } - n_threads_for_type[ET_CALL] = n_event_threads; - #if TS_USE_HWLOC int affinity = 1; REC_ReadConfigInteger(affinity, "proxy.config.exec_thread.affinity"); - hwloc_obj_t obj; - hwloc_obj_type_t obj_type; - int obj_count = 0; - char *obj_name; switch (affinity) { case 4: // assign threads to logical processing units // Older versions of libhwloc (eg. Ubuntu 10.04) don't have HWLOC_OBJ_PU. #if HAVE_HWLOC_OBJ_PU - obj_type = HWLOC_OBJ_PU; - obj_name = (char *)"Logical Processor"; + _type = HWLOC_OBJ_PU; + _name = "Logical Processor"; break; #endif case 3: // assign threads to real cores - obj_type = HWLOC_OBJ_CORE; - obj_name = (char *)"Core"; + _type = HWLOC_OBJ_CORE; + _name = "Core"; break; case 1: // assign threads to NUMA nodes (often 1:1 with sockets) - obj_type = HWLOC_OBJ_NODE; - obj_name = (char *)"NUMA Node"; - if (hwloc_get_nbobjs_by_type(ink_get_topology(), obj_type) > 0) { + _type = HWLOC_OBJ_NODE; + _name = "NUMA Node"; + if (hwloc_get_nbobjs_by_type(ink_get_topology(), _type) > 0) { break; } case 2: // assign threads to sockets - obj_type = HWLOC_OBJ_SOCKET; - obj_name = (char *)"Socket"; + _type = HWLOC_OBJ_SOCKET; + _name = "Socket"; break; default: // assign threads to the machine as a whole (a level below SYSTEM) - obj_type = HWLOC_OBJ_MACHINE; - obj_name = (char *)"Machine"; + _type = HWLOC_OBJ_MACHINE; + _name = "Machine"; } - // How many of the above `obj_type` do we have in our topology? - obj_count = hwloc_get_nbobjs_by_type(ink_get_topology(), obj_type); + _count = hwloc_get_nbobjs_by_type(ink_get_topology(), _type); Debug("iocore_thread", "Affinity: %d %ss: %d PU: %d", affinity, obj_name, obj_count, ink_number_of_processors()); - #endif - for (i = 0; i < n_ethreads; i++) { - ink_thread tid; +} +int +ThreadAffinityInitializer::set_affinity(int, Event *) +{ #if TS_USE_HWLOC - if (obj_count > 0) { - // Get our `obj` instance with index based on the thread number we are on. - obj = hwloc_get_obj_by_type(ink_get_topology(), obj_type, i % obj_count); + hwloc_obj_t obj; + EThread *t = this_ethread(); + + if (_count > 0) { + obj = hwloc_get_obj_by_type(ink_get_topology(), _type, t->id % _count); #if HWLOC_API_VERSION >= 0x00010100 - // Pretty print our CPU set - int cpu_mask_len = hwloc_bitmap_snprintf(nullptr, 0, obj->cpuset) + 1; - char *cpu_mask = (char *)alloca(cpu_mask_len); - hwloc_bitmap_snprintf(cpu_mask, cpu_mask_len, obj->cpuset); - Debug("iocore_thread", "EThread: %d %s: %d CPU Mask: %s", i, obj_name, obj->logical_index, cpu_mask); + int cpu_mask_len = hwloc_bitmap_snprintf(NULL, 0, obj->cpuset) + 1; + char *cpu_mask = (char *)alloca(cpu_mask_len); + hwloc_bitmap_snprintf(cpu_mask, cpu_mask_len, obj->cpuset); + Debug("iocore_thread", "EThread: %d %s: %d CPU Mask: %s\n", i, _name, obj->logical_index, cpu_mask); #else - Debug("iocore_thread", "EThread: %d %s: %d", i, obj_name, obj->logical_index); + Debug("iocore_thread", "EThread: %d %s: %d", i, _name, obj->logical_index); #endif // HWLOC_API_VERSION - } + hwloc_set_thread_cpubind(ink_get_topology(), t->tid, obj->cpuset, HWLOC_CPUBIND_STRICT); + } else { + Warning("hwloc returned an unexpected number of objects -- CPU affinity disabled"); + } #endif // TS_USE_HWLOC + return 0; +} - // Name our thread - snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[ET_NET %d]", i); -#if TS_USE_HWLOC - // Lets create a NUMA local stack if we can - if (obj_count > 0) { - stack = alloc_numa_stack(obj->cpuset, stacksize); - } else { - // Lets just alloc a stack even with no NUMA knowledge - stack = alloc_stack(stacksize); - } -#else - // Lets just alloc a stack even with no NUMA knowledge - stack = alloc_stack(stacksize); -#endif // TS_USE_HWLOC +Event * +EventProcessor::schedule_spawn(Continuation *c, EventType ev_type, int event, void *cookie) +{ + Event *e = eventAllocator.alloc(); + ink_assert(ev_type < MAX_EVENT_TYPES); - // Start our new thread with our new stack. - tid = all_ethreads[i]->start(thr_name, stacksize, nullptr, nullptr, stack); - stack = nullptr; + e->mutex = c->mutex; + e->callback_event = event; + e->cookie = cookie; + e->init(c); + thread_group[ev_type]._spawnQueue.enqueue(e); -#if TS_USE_HWLOC - if (obj_count > 0) { - // Lets bind our new thread to it's CPU set - hwloc_set_thread_cpubind(ink_get_topology(), tid, obj->cpuset, HWLOC_CPUBIND_STRICT); - } else { - Warning("hwloc returned an unexpected value -- CPU affinity disabled"); + return e; +} + +Event * +EventProcessor::schedule_spawn(void (*f)(EThread *), EventType ev_type) +{ + Event *e = eventAllocator.alloc(); + ink_assert(ev_type < MAX_EVENT_TYPES); + + e->callback_event = EVENT_IMMEDIATE; + e->cookie = reinterpret_cast(f); + e->init(&Thread_Init_Func); + thread_group[ev_type]._spawnQueue.enqueue(e); + + return e; +} + +EventType +EventProcessor::register_event_type(char const *name) +{ + ThreadGroupDescriptor *tg = &(thread_group[n_thread_groups++]); + ink_release_assert(n_thread_groups <= MAX_EVENT_TYPES); // check for overflow + + tg->_name = ats_strdup(name); + return n_thread_groups - 1; +} + +EventType +EventProcessor::spawn_event_threads(char const *name, int n_threads, size_t stacksize) +{ + int ev_type = this->register_event_type(name); + this->spawn_event_threads(ev_type, n_threads, stacksize); + return ev_type; +} + +EventType +EventProcessor::spawn_event_threads(EventType ev_type, int n_threads, size_t stacksize) +{ + char thr_name[MAX_THREAD_NAME_LENGTH]; + int i; + ThreadGroupDescriptor *tg = &(thread_group[ev_type]); + + ink_release_assert(n_threads > 0); + ink_release_assert((n_ethreads + n_threads) <= MAX_EVENT_THREADS); + ink_release_assert(ev_type < MAX_EVENT_TYPES); + + for (i = 0; i < n_threads; ++i) { + EThread *t = new EThread(REGULAR, n_ethreads + i); + all_ethreads[n_ethreads + i] = t; + tg->_thread[i] = t; + t->set_event_type(ev_type); + t->schedule_spawn(&thread_initializer); + } + tg->_count = n_threads; + + for (i = 0; i < n_threads; i++) { + snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[%s %d]", tg->_name.get(), i); + tg->_thread[i]->start(thr_name, nullptr, stacksize); + } + + n_ethreads += n_threads; + Debug("iocore_thread", "Created thread group '%s' id %d with %d threads", tg->_name.get(), ev_type, n_threads); + + return ev_type; // useless but not sure what would be better. +} + +void +EventProcessor::initThreadState(EThread *t) +{ + // Run all thread type initialization continuations that match the event types for this thread. + for (int i = 0; i < MAX_EVENT_TYPES; ++i) { + if (t->is_event_type(i)) { // that event type done here, roll thread start events of that type. + // To avoid race conditions on the event in the spawn queue, create a local one to actually send. + // Use the spawn queue event as a read only model. + Event *nev = eventAllocator.alloc(); + for (Event *ev = thread_group[i]._spawnQueue.head; NULL != ev; ev = ev->link.next) { + nev->init(ev->continuation, 0, 0); + nev->ethread = t; + nev->callback_event = ev->callback_event; + nev->mutex = ev->continuation->mutex; + nev->cookie = ev->cookie; + ev->continuation->handleEvent(ev->callback_event, nev); + } + nev->free(); } -#else - // Lets ignore tid if we don't link with HWLOC - (void)tid; -#endif // TS_USE_HWLOC } +} + +int +EventProcessor::start(int n_event_threads, size_t stacksize) +{ + // do some sanity checking. + static int started = 0; + ink_release_assert(!started); + ink_release_assert(n_event_threads > 0 && n_event_threads <= MAX_EVENT_THREADS); + started = 1; + + Thread_Affinity_Initializer.init(); + this->schedule_spawn(&Thread_Affinity_Initializer, ET_CALL); + this->spawn_event_threads(ET_CALL, n_event_threads, stacksize); Debug("iocore_thread", "Created event thread group id %d with %d threads", ET_CALL, n_event_threads); return 0; @@ -265,15 +262,38 @@ EventProcessor::shutdown() Event * EventProcessor::spawn_thread(Continuation *cont, const char *thr_name, size_t stacksize) { - ink_release_assert(n_dthreads < MAX_EVENT_THREADS); + /* Spawning threads in a live system - There are two potential race conditions in this logic. The + first is multiple calls to this method. In that case @a all_dthreads can end up in a bad state + as the same entry is overwritten while another is left unitialized. + + The other is read/write contention where another thread (e.g. the stats collection thread) is + iterating over the threads while the active count (@a n_dthreads) is being updated causing use + of not yet initialized array element. + + This logic covers both situations. For write/write the actual array update is locked. The + potentially expensive set up is done outside the lock making the time spent locked small For + read/write it suffices to do the active count increment after initializing the array + element. It's not a problem if, for one cycle, a new thread is skipped. + */ + + // Do as much as possible outside the lock. Until the array element and count is changed + // this is thread safe. Event *e = eventAllocator.alloc(); - e->init(cont, 0, 0); - all_dthreads[n_dthreads] = new EThread(DEDICATED, e); - e->ethread = all_dthreads[n_dthreads]; - e->mutex = e->continuation->mutex = all_dthreads[n_dthreads]->mutex; - n_dthreads++; - e->ethread->start(thr_name, stacksize, nullptr, nullptr, nullptr); + EThread *et = new EThread(DEDICATED, e); + e->mutex = et->mutex; + cont->mutex = et->mutex; + e->ethread = et; + + { + SCOPED_MUTEX_LOCK(_, dedicated_thread_spawn_lock, this_ethread()); + + ink_release_assert(n_dthreads < MAX_EVENT_THREADS); + + all_dthreads[n_dthreads++] = et; + } + + e->ethread->start(thr_name, nullptr, stacksize); return e; } diff --git a/iocore/net/I_NetProcessor.h b/iocore/net/I_NetProcessor.h index 8b030061085..72aa0b90bbc 100644 --- a/iocore/net/I_NetProcessor.h +++ b/iocore/net/I_NetProcessor.h @@ -198,14 +198,10 @@ class NetProcessor : public Processor Action *connect_s(Continuation *cont, sockaddr const *addr, int timeout = NET_CONNECT_TIMEOUT, NetVCOptions *opts = nullptr); /** - Starts the Netprocessor. This has to be called before doing any - other net call. - - @param number_of_net_threads is not used. The net processor - uses the Event Processor threads for its activity. + Initializes the net processor. This must be called before the event threads are started. */ - virtual int start(int number_of_net_threads, size_t stacksize) = 0; + virtual void init() = 0; inkcoreapi virtual NetVConnection *allocate_vc(EThread *) = 0; diff --git a/iocore/net/P_UnixNetProcessor.h b/iocore/net/P_UnixNetProcessor.h index 3f3de01fa36..408f954bb10 100644 --- a/iocore/net/P_UnixNetProcessor.h +++ b/iocore/net/P_UnixNetProcessor.h @@ -43,7 +43,7 @@ struct UnixNetProcessor : public NetProcessor { virtual NetAccept *createNetAccept(const NetProcessor::AcceptOptions &opt); virtual NetVConnection *allocate_vc(EThread *t); - virtual int start(int number_of_net_threads, size_t stacksize); + virtual void init() override; Event *accept_thread_event; diff --git a/iocore/net/SSLNetProcessor.cc b/iocore/net/SSLNetProcessor.cc index 658d1eb7f97..b1b2b27e0a9 100644 --- a/iocore/net/SSLNetProcessor.cc +++ b/iocore/net/SSLNetProcessor.cc @@ -71,7 +71,7 @@ SSLNetProcessor::start(int, size_t stacksize) #ifdef HAVE_OPENSSL_OCSP_STAPLING if (SSLConfigParams::ssl_ocsp_enabled) { - EventType ET_OCSP = eventProcessor.spawn_event_threads(1, "ET_OCSP", stacksize); + EventType ET_OCSP = eventProcessor.spawn_event_threads("ET_OCSP", 1, stacksize); eventProcessor.schedule_every(new OCSPContinuation(), HRTIME_SECONDS(SSLConfigParams::ssl_ocsp_update_period), ET_OCSP); } #endif /* HAVE_OPENSSL_OCSP_STAPLING */ diff --git a/iocore/net/SSLNextProtocolAccept.cc b/iocore/net/SSLNextProtocolAccept.cc index 5ec4d72bb91..cdcbf4a046e 100644 --- a/iocore/net/SSLNextProtocolAccept.cc +++ b/iocore/net/SSLNextProtocolAccept.cc @@ -85,7 +85,7 @@ struct SSLNextProtocolTrampoline : public Continuation { case VC_EVENT_INACTIVITY_TIMEOUT: // Cancel the read before we have a chance to delete the continuation netvc->do_io_read(nullptr, 0, nullptr); - netvc->do_io(VIO::CLOSE); + netvc->do_io_close(); delete this; return EVENT_ERROR; case VC_EVENT_READ_COMPLETE: @@ -109,7 +109,7 @@ struct SSLNextProtocolTrampoline : public Continuation { send_plugin_event(npnParent->endpoint, NET_EVENT_ACCEPT, netvc); } else { // No handler, what should we do? Best to just kill the VC while we can. - netvc->do_io(VIO::CLOSE); + netvc->do_io_close(); } delete this; @@ -136,10 +136,10 @@ SSLNextProtocolAccept::mainEvent(int event, void *edata) // the endpoint that there is an accept to handle until the read completes // and we know which protocol was negotiated. netvc->registerNextProtocolSet(&this->protoset); - netvc->do_io(VIO::READ, new SSLNextProtocolTrampoline(this, netvc->mutex), 0, this->buffer, 0); + netvc->do_io_read(new SSLNextProtocolTrampoline(this, netvc->mutex), 0, this->buffer); return EVENT_CONT; default: - netvc->do_io(VIO::CLOSE); + netvc->do_io_close(); return EVENT_DONE; } } diff --git a/iocore/net/SSLUtils.cc b/iocore/net/SSLUtils.cc index 8619b8e99c0..ef20872de71 100644 --- a/iocore/net/SSLUtils.cc +++ b/iocore/net/SSLUtils.cc @@ -67,7 +67,6 @@ #define SSL_ACTION_TAG "action" #define SSL_ACTION_TUNNEL_TAG "tunnel" #define SSL_SESSION_TICKET_ENABLED "ssl_ticket_enabled" -#define SSL_SESSION_TICKET_KEY_FILE_TAG "ticket_key_name" #define SSL_KEY_DIALOG "ssl_key_dialog" #define SSL_CERT_SEPARATE_DELIM ',' @@ -100,8 +99,6 @@ struct ssl_user_config { ssl_user_config() : opt(SSLCertContext::OPT_NONE) { REC_ReadConfigInt32(session_ticket_enabled, "proxy.config.ssl.server.session_ticket.enable"); - REC_ReadConfigStringAlloc(ticket_key_filename, "proxy.config.ssl.server.ticket_key.filename"); - Debug("ssl", "ticket key filename %s", (const char *)ticket_key_filename); } int session_ticket_enabled; @@ -110,7 +107,6 @@ struct ssl_user_config { ats_scoped_str first_cert; ats_scoped_str ca; ats_scoped_str key; - ats_scoped_str ticket_key_filename; ats_scoped_str dialog; SSLCertContext::Option opt; }; @@ -1810,11 +1806,8 @@ ssl_store_ssl_context(const SSLConfigParams *params, SSLCertLookup *lookup, cons } } - // Load the session ticket key if session tickets are not disabled and we have key name. - if (sslMultCertSettings->session_ticket_enabled != 0 && sslMultCertSettings->ticket_key_filename) { - ats_scoped_str ticket_key_path(Layout::relative_to(params->serverCertPathOnly, sslMultCertSettings->ticket_key_filename)); - keyblock = ssl_context_enable_tickets(ctx, ticket_key_path); - } else if (sslMultCertSettings->session_ticket_enabled != 0) { + // Load the session ticket key if session tickets are not disabled + if (sslMultCertSettings->session_ticket_enabled != 0) { keyblock = ssl_context_enable_tickets(ctx, nullptr); } @@ -1936,10 +1929,6 @@ ssl_extract_certificate(const matcher_line *line_info, ssl_user_config &sslMultC sslMultCertSettings.session_ticket_enabled = atoi(value); } - if (strcasecmp(label, SSL_SESSION_TICKET_KEY_FILE_TAG) == 0) { - sslMultCertSettings.ticket_key_filename = ats_strdup(value); - } - if (strcasecmp(label, SSL_KEY_DIALOG) == 0) { sslMultCertSettings.dialog = ats_strdup(value); } diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc index b574032728d..00c6493a55c 100644 --- a/iocore/net/UnixNet.cc +++ b/iocore/net/UnixNet.cc @@ -603,7 +603,7 @@ void NetHandler::configure_per_thread() { // figure out the number of threads and calculate the number of connections per thread - int threads = eventProcessor.n_threads_for_type[ET_NET]; + int threads = eventProcessor.thread_group[ET_NET]._count; max_connections_per_thread_in = max_connections_in / threads; max_connections_active_per_thread_in = max_connections_active_in / threads; Debug("net_queue", "max_connections_per_thread_in updated to %d threads: %d", max_connections_per_thread_in, threads); diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc index 2afd8dda4da..f1252e6dcfd 100644 --- a/iocore/net/UnixNetAccept.cc +++ b/iocore/net/UnixNetAccept.cc @@ -168,7 +168,7 @@ NetAccept::init_accept_per_thread() SET_HANDLER((NetAcceptHandler)&NetAccept::acceptEvent); period = -HRTIME_MSECONDS(net_accept_period); - n = eventProcessor.n_threads_for_type[opt.etype]; + n = eventProcessor.thread_group[opt.etype]._count; for (i = 0; i < n; i++) { NetAccept *a; @@ -179,7 +179,7 @@ NetAccept::init_accept_per_thread() a = this; } - EThread *t = eventProcessor.eventthread[opt.etype][i]; + EThread *t = eventProcessor.thread_group[opt.etype]._thread[i]; PollDescriptor *pd = get_PollDescriptor(t); if (a->ep.start(pd, a, EVENTIO_READ) < 0) diff --git a/iocore/net/UnixNetPages.cc b/iocore/net/UnixNetPages.cc index a20e1a8b5d7..af2c2674801 100644 --- a/iocore/net/UnixNetPages.cc +++ b/iocore/net/UnixNetPages.cc @@ -103,8 +103,8 @@ struct ShowNet : public ShowCont { vc->f.shutdown, vc->closed ? "closed " : "")); } ithread++; - if (ithread < eventProcessor.n_threads_for_type[ET_NET]) - eventProcessor.eventthread[ET_NET][ithread]->schedule_imm(this); + if (ithread < eventProcessor.thread_group[ET_NET]._count) + eventProcessor.thread_group[ET_NET]._thread[ithread]->schedule_imm(this); else { CHECK_SHOW(show("\n")); return complete(event, e); @@ -138,7 +138,7 @@ struct ShowNet : public ShowCont { "Comments" "\n")); SET_HANDLER(&ShowNet::showConnectionsOnThread); - eventProcessor.eventthread[ET_NET][0]->schedule_imm(this); // This can not use ET_TASK. + eventProcessor.thread_group[ET_NET]._thread[0]->schedule_imm(this); // This can not use ET_TASK. return EVENT_CONT; } @@ -166,8 +166,8 @@ struct ShowNet : public ShowCont { CHECK_SHOW(show("#Read PriorityRead BucketWrite PriorityWrite Bucket\n")); CHECK_SHOW(show("\n")); ithread++; - if (ithread < eventProcessor.n_threads_for_type[ET_NET]) - eventProcessor.eventthread[ET_NET][ithread]->schedule_imm(this); + if (ithread < eventProcessor.thread_group[ET_NET]._count) + eventProcessor.thread_group[ET_NET]._thread[ithread]->schedule_imm(this); else return complete(event, e); return EVENT_CONT; @@ -178,7 +178,7 @@ struct ShowNet : public ShowCont { { CHECK_SHOW(begin("Net Threads")); SET_HANDLER(&ShowNet::showSingleThread); - eventProcessor.eventthread[ET_NET][0]->schedule_imm(this); // This can not use ET_TASK + eventProcessor.thread_group[ET_NET]._thread[0]->schedule_imm(this); // This can not use ET_TASK return EVENT_CONT; } int diff --git a/iocore/net/UnixNetProcessor.cc b/iocore/net/UnixNetProcessor.cc index 905d268cef0..9bb6332d074 100644 --- a/iocore/net/UnixNetProcessor.cc +++ b/iocore/net/UnixNetProcessor.cc @@ -397,22 +397,20 @@ NetProcessor::connect_s(Continuation *cont, sockaddr const *target, int timeout, struct PollCont; -// This is a little odd, in that the actual threads are created before calling the processor. -int -UnixNetProcessor::start(int, size_t) +// This needs to be called before the ET_NET threads are started. +void +UnixNetProcessor::init() { + extern void initialize_thread_for_http_sessions(EThread * thread); EventType etype = ET_NET; netHandler_offset = eventProcessor.allocate(sizeof(NetHandler)); pollCont_offset = eventProcessor.allocate(sizeof(PollCont)); - n_netthreads = eventProcessor.n_threads_for_type[etype]; - netthreads = eventProcessor.eventthread[etype]; - for (int i = 0; i < n_netthreads; ++i) { - initialize_thread_for_net(netthreads[i]); - extern void initialize_thread_for_http_sessions(EThread * thread, int thread_index); - initialize_thread_for_http_sessions(netthreads[i], i); - } + if (0 == accept_mss) + REC_ReadConfigInteger(accept_mss, "proxy.config.net.sock_mss_in"); + + eventProcessor.schedule_spawn(&initialize_thread_for_net, etype); RecData d; d.rec_int = 0; @@ -432,21 +430,12 @@ UnixNetProcessor::start(int, size_t) } } - // commented by vijay - bug 2489945 - /*if (use_accept_thread) // 0 - { NetAccept * na = createNetAccept(); - SET_CONTINUATION_HANDLER(na,&NetAccept::acceptLoopEvent); - accept_thread_event = eventProcessor.spawn_thread(na); - if (!accept_thread_event) delete na; - } */ - /* * Stat pages */ extern Action *register_ShowNet(Continuation * c, HTTPHdr * h); if (etype == ET_NET) statPagesManager.register_http("net", register_ShowNet); - return 1; } // Virtual function allows creation of an diff --git a/iocore/net/UnixNetVConnection.cc b/iocore/net/UnixNetVConnection.cc index 55034e6182f..ac6f6c05485 100644 --- a/iocore/net/UnixNetVConnection.cc +++ b/iocore/net/UnixNetVConnection.cc @@ -773,6 +773,7 @@ UnixNetVConnection::do_io_shutdown(ShutdownHowTo_t howto) read.enabled = 0; read.vio.buffer.clear(); read.vio.nbytes = 0; + read.vio._cont = nullptr; f.shutdown = NET_VC_SHUTDOWN_READ; break; case IO_SHUTDOWN_WRITE: @@ -780,6 +781,7 @@ UnixNetVConnection::do_io_shutdown(ShutdownHowTo_t howto) write.enabled = 0; write.vio.buffer.clear(); write.vio.nbytes = 0; + write.vio._cont = nullptr; f.shutdown = NET_VC_SHUTDOWN_WRITE; break; case IO_SHUTDOWN_READWRITE: @@ -790,6 +792,8 @@ UnixNetVConnection::do_io_shutdown(ShutdownHowTo_t howto) read.vio.nbytes = 0; write.vio.buffer.clear(); write.vio.nbytes = 0; + read.vio._cont = nullptr; + write.vio._cont = nullptr; f.shutdown = NET_VC_SHUTDOWN_READ | NET_VC_SHUTDOWN_WRITE; break; default: diff --git a/iocore/net/UnixUDPNet.cc b/iocore/net/UnixUDPNet.cc index 092aedf5dc9..12bca90677c 100644 --- a/iocore/net/UnixUDPNet.cc +++ b/iocore/net/UnixUDPNet.cc @@ -91,16 +91,13 @@ UDPNetProcessorInternal::start(int n_upd_threads, size_t stacksize) if (n_upd_threads < 1) return -1; - ET_UDP = eventProcessor.spawn_event_threads(n_upd_threads, "ET_UDP", stacksize); - if (ET_UDP < 0) // Probably can't happen, maybe at some point EventType should be unsigned ? - return -1; + ET_UDP = eventProcessor.register_event_type("ET_UDP"); + eventProcessor.schedule_spawn(&initialize_thread_for_udp_net, ET_UDP); + eventProcessor.spawn_event_threads(ET_UDP, n_upd_threads, stacksize); pollCont_offset = eventProcessor.allocate(sizeof(PollCont)); udpNetHandler_offset = eventProcessor.allocate(sizeof(UDPNetHandler)); - for (int i = 0; i < eventProcessor.n_threads_for_type[ET_UDP]; i++) - initialize_thread_for_udp_net(eventProcessor.eventthread[ET_UDP][i]); - return 0; } diff --git a/iocore/utils/OneWayMultiTunnel.cc b/iocore/utils/OneWayMultiTunnel.cc index 4de85236950..23408d2ec1e 100644 --- a/iocore/utils/OneWayMultiTunnel.cc +++ b/iocore/utils/OneWayMultiTunnel.cc @@ -95,11 +95,11 @@ OneWayMultiTunnel::init(VConnection *vcSource, VConnection **vcTargets, int n_vc buf1->water_mark = water_mark; - vioSource = vcSource->do_io(VIO::READ, this, nbytes, buf1, 0); + vioSource = vcSource->do_io_read(this, nbytes, buf1); ink_assert(n_vcTargets <= ONE_WAY_MULTI_TUNNEL_LIMIT); for (int i = 0; i < n_vcTargets; i++) - vioTargets[i] = vcTargets[i]->do_io(VIO::WRITE, this, INT64_MAX, buf2, 0); + vioTargets[i] = vc_do_io_write(vcTargets[i], this, INT64_MAX, buf2, 0); return; } @@ -235,7 +235,7 @@ OneWayMultiTunnel::close_target_vio(int result, VIO *vio) if (last_connection() || !single_buffer) free_MIOBuffer(v->buffer.writer()); if (close_target) - v->vc_server->do_io(result ? VIO::ABORT : VIO::CLOSE); + v->vc_server->do_io_close(); vioTargets[i] = nullptr; n_connections--; } diff --git a/iocore/utils/OneWayTunnel.cc b/iocore/utils/OneWayTunnel.cc index 97e5ea042c6..d45e5f168cb 100644 --- a/iocore/utils/OneWayTunnel.cc +++ b/iocore/utils/OneWayTunnel.cc @@ -179,10 +179,10 @@ OneWayTunnel::init(VConnection *vcSource, VConnection *vcTarget, Continuation *a close_target = aclose_target; tunnel_till_done = true; - // Prior to constructing the OneWayTunnel, we initiated a do_io(VIO::READ) + // Prior to constructing the OneWayTunnel, we initiated a do_io_read() // on the source VC. We wish to use the same MIO buffer in the tunnel. - // do_io() read already posted on vcSource. + // do_io_read() already posted on vcSource. SET_HANDLER(&OneWayTunnel::startEvent); SourceVio->set_continuation(this); diff --git a/lib/cppapi/Plugin.cc b/lib/cppapi/Plugin.cc index 766f9ad4774..a7d2514052a 100644 --- a/lib/cppapi/Plugin.cc +++ b/lib/cppapi/Plugin.cc @@ -36,5 +36,5 @@ atscppapi::RegisterGlobalPlugin(const char *name, const char *vendor, const char info.vendor_name = vendor; info.support_email = email; if (TSPluginRegister(&info) != TS_SUCCESS) - TSError("[Plugin.cc] Plugin registration failed."); + TSError("[Plugin.cc] Plugin registration failed"); } diff --git a/lib/records/RecRawStats.cc b/lib/records/RecRawStats.cc index b23703fb604..b0f075dd31d 100644 --- a/lib/records/RecRawStats.cc +++ b/lib/records/RecRawStats.cc @@ -23,14 +23,24 @@ #include "P_RecCore.h" #include "P_RecProcess.h" +#include //------------------------------------------------------------------------- // raw_stat_get_total //------------------------------------------------------------------------- + +namespace +{ +inline RecRawStat * +thread_stat(EThread *et, RecRawStatBlock *rsb, int id) +{ + return (reinterpret_cast(reinterpret_cast(et) + rsb->ethr_stat_offset)) + id; +} +} + static int raw_stat_get_total(RecRawStatBlock *rsb, int id, RecRawStat *total) { - int i; RecRawStat *tlp; total->sum = 0; @@ -41,14 +51,14 @@ raw_stat_get_total(RecRawStatBlock *rsb, int id, RecRawStat *total) total->count = rsb->global[id]->count; // get thread local values - for (i = 0; i < eventProcessor.n_ethreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_ethreads()) { + tlp = thread_stat(et, rsb, id); total->sum += tlp->sum; total->count += tlp->count; } - for (i = 0; i < eventProcessor.n_dthreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_dthreads()) { + tlp = thread_stat(et, rsb, id); total->sum += tlp->sum; total->count += tlp->count; } @@ -66,7 +76,6 @@ raw_stat_get_total(RecRawStatBlock *rsb, int id, RecRawStat *total) static int raw_stat_sync_to_global(RecRawStatBlock *rsb, int id) { - int i; RecRawStat *tlp; RecRawStat total; @@ -74,14 +83,14 @@ raw_stat_sync_to_global(RecRawStatBlock *rsb, int id) total.count = 0; // sum the thread local values - for (i = 0; i < eventProcessor.n_ethreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_ethreads()) { + tlp = thread_stat(et, rsb, id); total.sum += tlp->sum; total.count += tlp->count; } - for (i = 0; i < eventProcessor.n_dthreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_dthreads()) { + tlp = thread_stat(et, rsb, id); total.sum += tlp->sum; total.count += tlp->count; } @@ -135,14 +144,14 @@ raw_stat_clear(RecRawStatBlock *rsb, int id) // reset the local stats RecRawStat *tlp; - for (int i = 0; i < eventProcessor.n_ethreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_ethreads()) { + tlp = thread_stat(et, rsb, id); ink_atomic_swap(&(tlp->sum), (int64_t)0); ink_atomic_swap(&(tlp->count), (int64_t)0); } - for (int i = 0; i < eventProcessor.n_dthreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_dthreads()) { + tlp = thread_stat(et, rsb, id); ink_atomic_swap(&(tlp->sum), (int64_t)0); ink_atomic_swap(&(tlp->count), (int64_t)0); } @@ -167,13 +176,13 @@ raw_stat_clear_sum(RecRawStatBlock *rsb, int id) // reset the local stats RecRawStat *tlp; - for (int i = 0; i < eventProcessor.n_ethreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_ethreads()) { + tlp = thread_stat(et, rsb, id); ink_atomic_swap(&(tlp->sum), (int64_t)0); } - for (int i = 0; i < eventProcessor.n_dthreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_dthreads()) { + tlp = thread_stat(et, rsb, id); ink_atomic_swap(&(tlp->sum), (int64_t)0); } @@ -197,13 +206,13 @@ raw_stat_clear_count(RecRawStatBlock *rsb, int id) // reset the local stats RecRawStat *tlp; - for (int i = 0; i < eventProcessor.n_ethreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_ethreads()) { + tlp = thread_stat(et, rsb, id); ink_atomic_swap(&(tlp->count), (int64_t)0); } - for (int i = 0; i < eventProcessor.n_dthreads; i++) { - tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + rsb->ethr_stat_offset)) + id; + for (EThread *et : eventProcessor.active_dthreads()) { + tlp = thread_stat(et, rsb, id); ink_atomic_swap(&(tlp->count), (int64_t)0); } diff --git a/lib/ts/BaseLogFile.cc b/lib/ts/BaseLogFile.cc index af5fc18d8b5..c9bbe7991dc 100644 --- a/lib/ts/BaseLogFile.cc +++ b/lib/ts/BaseLogFile.cc @@ -27,9 +27,8 @@ * This consturctor creates a BaseLogFile based on a given name. * This is the most common way BaseLogFiles are created. */ -BaseLogFile::BaseLogFile(const char *name) : m_signature(0), m_has_signature(false) +BaseLogFile::BaseLogFile(const char *name) : m_name(ats_strdup(name)) { - init(name); log_log_trace("exiting BaseLogFile constructor, m_name=%s, this=%p\n", m_name.get(), this); } @@ -37,9 +36,8 @@ BaseLogFile::BaseLogFile(const char *name) : m_signature(0), m_has_signature(fal * This consturctor creates a BaseLogFile based on a given name. * Similar to above constructor, but is overloaded with the object signature */ -BaseLogFile::BaseLogFile(const char *name, uint64_t sig) : m_signature(sig), m_has_signature(true) +BaseLogFile::BaseLogFile(const char *name, uint64_t sig) : m_name(ats_strdup(name)), m_signature(sig), m_has_signature(true) { - init(name); log_log_trace("exiting BaseLogFile signature constructor, m_name=%s, m_signature=%ld, this=%p\n", m_name.get(), m_signature, this); } @@ -52,13 +50,13 @@ BaseLogFile::BaseLogFile(const BaseLogFile ©) m_start_time(copy.m_start_time), m_end_time(0L), m_bytes_written(0), - m_signature(copy.m_signature), - m_has_signature(copy.m_has_signature), m_name(ats_strdup(copy.m_name)), m_hostname(ats_strdup(copy.m_hostname)), m_is_regfile(false), m_is_init(copy.m_is_init), - m_meta_info(nullptr) + m_meta_info(nullptr), + m_signature(copy.m_signature), + m_has_signature(copy.m_has_signature) { log_log_trace("exiting BaseLogFile copy constructor, m_name=%s, this=%p\n", m_name.get(), this); } @@ -78,23 +76,6 @@ BaseLogFile::~BaseLogFile() log_log_trace("exiting BaseLogFile destructor, this=%p\n", this); } -/* - * Initializes the defaults of some of the common member values of this class - */ -void -BaseLogFile::init(const char *name) -{ - m_fp = nullptr; - m_start_time = time(nullptr); - m_end_time = 0L; - m_bytes_written = 0; - m_name = ats_strdup(name); - m_hostname = nullptr; - m_is_regfile = false; - m_is_init = false; - m_meta_info = nullptr; -} - /* * This function is called by a client of BaseLogFile to roll the underlying * file The tricky part to this routine is in coming up with the new file name, diff --git a/lib/ts/BaseLogFile.h b/lib/ts/BaseLogFile.h index 9068e5a4528..3c5a570a44a 100644 --- a/lib/ts/BaseLogFile.h +++ b/lib/ts/BaseLogFile.h @@ -166,6 +166,8 @@ class BaseLogFile { public: // member functions + BaseLogFile() = delete; + BaseLogFile &operator=(const BaseLogFile &) = delete; BaseLogFile(const char *name); BaseLogFile(const char *name, uint64_t sig); BaseLogFile(const BaseLogFile &); @@ -217,27 +219,22 @@ class BaseLogFile LOG_FILE_COULD_NOT_OPEN_FILE, }; - FILE *m_fp; - long m_start_time; - long m_end_time; - volatile uint64_t m_bytes_written; + FILE *m_fp = nullptr; + long m_start_time = time(nullptr); + long m_end_time = 0L; + volatile uint64_t m_bytes_written = 0; private: - void init(const char *name); - // member functions not allowed - BaseLogFile(); - BaseLogFile &operator=(const BaseLogFile &); - // member functions int timestamp_to_str(long timestamp, char *buf, int size); // member variables - uint64_t m_signature; - bool m_has_signature; ats_scoped_str m_name; ats_scoped_str m_hostname; - bool m_is_regfile; - bool m_is_init; - BaseMetaInfo *m_meta_info; + bool m_is_regfile = false; + bool m_is_init = false; + BaseMetaInfo *m_meta_info = nullptr; + uint64_t m_signature = 0; + bool m_has_signature = false; }; #endif diff --git a/lib/ts/apidefs.h.in b/lib/ts/apidefs.h.in index 87c77a63d5e..a06b6cd0257 100644 --- a/lib/ts/apidefs.h.in +++ b/lib/ts/apidefs.h.in @@ -731,7 +731,6 @@ typedef enum { TS_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES, TS_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY, TS_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT, - TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE, TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE, TS_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT, TS_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT, diff --git a/lib/ts/ink_config.h.in b/lib/ts/ink_config.h.in index b96692fffa8..79b2c00c395 100644 --- a/lib/ts/ink_config.h.in +++ b/lib/ts/ink_config.h.in @@ -124,4 +124,6 @@ #define TS_BUILD_DEFAULT_LOOPBACK_IFACE "@default_loopback_iface@" /* clang-format on */ +static const int DEFAULT_STACKSIZE = @default_stack_size@; + #endif /* _ink_config_h */ diff --git a/lib/tsconfig/TsConfigGrammar.c b/lib/tsconfig/TsConfigGrammar.c index 23e5e35f744..2d72d06d360 100644 --- a/lib/tsconfig/TsConfigGrammar.c +++ b/lib/tsconfig/TsConfigGrammar.c @@ -1,8 +1,8 @@ -/* A Bison parser, made by GNU Bison 2.7. */ +/* A Bison parser, made by GNU Bison 3.0.4. */ /* Bison implementation for Yacc-like parsers in C - Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc. + Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -44,7 +44,7 @@ #define YYBISON 1 /* Bison version. */ -#define YYBISON_VERSION "2.7" +#define YYBISON_VERSION "3.0.4" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" @@ -59,8 +59,7 @@ #define YYPULL 1 /* "%code top" blocks. */ -/* Line 349 of yacc.c */ -#line 26 "TsConfigGrammar.y" +#line 26 "TsConfigGrammar.y" /* yacc.c:316 */ # if ! (defined(__clang_analyzer__) || defined(__COVERITY__)) # include "TsConfigTypes.h" @@ -71,36 +70,31 @@ # define YYFREE free # include "TsConfigParseEvents.h" -# include "ts/ink_defs.h" // Types we need for the lexer. typedef void* yyscan_t; extern int tsconfiglex(YYSTYPE* yylval, yyscan_t lexer); - -/* Line 349 of yacc.c */ -#line 84 "TsConfigGrammar.c" +#line 80 "TsConfigGrammar.c" /* yacc.c:316 */ /* Substitute the variable and function names. */ #define yyparse tsconfigparse #define yylex tsconfiglex #define yyerror tsconfigerror -#define yylval tsconfiglval -#define yychar tsconfigchar #define yydebug tsconfigdebug #define yynerrs tsconfignerrs + /* Copy the first part of user declarations. */ -/* Line 371 of yacc.c */ -#line 98 "TsConfigGrammar.c" +#line 92 "TsConfigGrammar.c" /* yacc.c:339 */ -# ifndef YY_NULL +# ifndef YY_NULLPTR # if defined __cplusplus && 201103L <= __cplusplus -# define YY_NULL nullptr +# define YY_NULLPTR nullptr # else -# define YY_NULL 0 +# define YY_NULLPTR 0 # endif # endif @@ -116,7 +110,7 @@ extern int tsconfiglex(YYSTYPE* yylval, yyscan_t lexer); by #include "y.tab.h". */ #ifndef YY_TSCONFIG_TSCONFIGGRAMMAR_H_INCLUDED # define YY_TSCONFIG_TSCONFIGGRAMMAR_H_INCLUDED -/* Enabling traces. */ +/* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif @@ -124,8 +118,7 @@ extern int tsconfiglex(YYSTYPE* yylval, yyscan_t lexer); extern int tsconfigdebug; #endif /* "%code requires" blocks. */ -/* Line 387 of yacc.c */ -#line 1 "TsConfigGrammar.y" +#line 1 "TsConfigGrammar.y" /* yacc.c:355 */ /** @file @@ -150,29 +143,26 @@ extern int tsconfigdebug; limitations under the License. */ +#line 147 "TsConfigGrammar.c" /* yacc.c:355 */ -/* Line 387 of yacc.c */ -#line 156 "TsConfigGrammar.c" - -/* Tokens. */ +/* Token type. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE - /* Put the tokens into the symbol table, so that GDB and other debuggers - know about them. */ - enum yytokentype { - STRING = 258, - IDENT = 259, - INTEGER = 260, - LIST_OPEN = 261, - LIST_CLOSE = 262, - GROUP_OPEN = 263, - GROUP_CLOSE = 264, - PATH_OPEN = 265, - PATH_CLOSE = 266, - PATH_SEPARATOR = 267, - SEPARATOR = 268, - ASSIGN = 269 - }; + enum yytokentype + { + STRING = 258, + IDENT = 259, + INTEGER = 260, + LIST_OPEN = 261, + LIST_CLOSE = 262, + GROUP_OPEN = 263, + GROUP_CLOSE = 264, + PATH_OPEN = 265, + PATH_CLOSE = 266, + PATH_SEPARATOR = 267, + SEPARATOR = 268, + ASSIGN = 269 + }; #endif /* Tokens. */ #define STRING 258 @@ -188,39 +178,24 @@ extern int tsconfigdebug; #define SEPARATOR 268 #define ASSIGN 269 - - +/* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED typedef int YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 -# define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 #endif -#ifdef YYPARSE_PARAM -#if defined __STDC__ || defined __cplusplus -int tsconfigparse (void *YYPARSE_PARAM); -#else -int tsconfigparse (); -#endif -#else /* ! YYPARSE_PARAM */ -#if defined __STDC__ || defined __cplusplus + int tsconfigparse (yyscan_t lexer, struct TsConfigHandlers* handlers); -#else -int tsconfigparse (); -#endif -#endif /* ! YYPARSE_PARAM */ #endif /* !YY_TSCONFIG_TSCONFIGGRAMMAR_H_INCLUDED */ /* Copy the second part of user declarations. */ -/* Line 390 of yacc.c */ -#line 221 "TsConfigGrammar.c" +#line 197 "TsConfigGrammar.c" /* yacc.c:358 */ /* Unqualified %code blocks. */ -/* Line 391 of yacc.c */ -#line 44 "TsConfigGrammar.y" +#line 43 "TsConfigGrammar.y" /* yacc.c:359 */ # define HANDLE_EVENT(x,y) \ @@ -230,9 +205,9 @@ int tsconfigparse (); } int tsconfigerror( - yyscan_t lexer ATS_UNUSED, + yyscan_t lexer, struct TsConfigHandlers* handlers, - const char* text + char const* text ) { return (handlers && handlers->error._f) ? handlers->error._f(handlers->error._data, text) @@ -241,9 +216,7 @@ int tsconfigerror( } - -/* Line 391 of yacc.c */ -#line 247 "TsConfigGrammar.c" +#line 220 "TsConfigGrammar.c" /* yacc.c:359 */ #ifdef short # undef short @@ -257,11 +230,8 @@ typedef unsigned char yytype_uint8; #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; -#elif (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -typedef signed char yytype_int8; #else -typedef short int yytype_int8; +typedef signed char yytype_int8; #endif #ifdef YYTYPE_UINT16 @@ -281,8 +251,7 @@ typedef short int yytype_int16; # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t -# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# elif ! defined YYSIZE_T # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else @@ -304,6 +273,33 @@ typedef short int yytype_int16; # endif #endif +#ifndef YY_ATTRIBUTE +# if (defined __GNUC__ \ + && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ + || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C +# define YY_ATTRIBUTE(Spec) __attribute__(Spec) +# else +# define YY_ATTRIBUTE(Spec) /* empty */ +# endif +#endif + +#ifndef YY_ATTRIBUTE_PURE +# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) +#endif + +#ifndef YY_ATTRIBUTE_UNUSED +# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) +#endif + +#if !defined _Noreturn \ + && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) +# if defined _MSC_VER && 1200 <= _MSC_VER +# define _Noreturn __declspec (noreturn) +# else +# define _Noreturn YY_ATTRIBUTE ((__noreturn__)) +# endif +#endif + /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) @@ -311,24 +307,26 @@ typedef short int yytype_int16; # define YYUSE(E) /* empty */ #endif -/* Identity function, used to suppress warnings about constant conditions. */ -#ifndef lint -# define YYID(N) (N) -#else -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -static int -YYID (int yyi) +#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ +/* Suppress an incorrect diagnostic about yylval being uninitialized. */ +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ + _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") +# define YY_IGNORE_MAYBE_UNINITIALIZED_END \ + _Pragma ("GCC diagnostic pop") #else -static int -YYID (yyi) - int yyi; +# define YY_INITIAL_VALUE(Value) Value #endif -{ - return yyi; -} +#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_END +#endif +#ifndef YY_INITIAL_VALUE +# define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif + #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ @@ -346,8 +344,7 @@ YYID (yyi) # define alloca _alloca # else # define YYSTACK_ALLOC alloca -# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS @@ -359,8 +356,8 @@ YYID (yyi) # endif # ifdef YYSTACK_ALLOC - /* Pacify GCC's `empty if-body' warning. */ -# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0)) + /* Pacify GCC's 'empty if-body' warning. */ +# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely @@ -376,7 +373,7 @@ YYID (yyi) # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ - && (defined YYFREE || defined free))) + && (defined YYFREE || defined free))) # include /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 @@ -384,15 +381,13 @@ YYID (yyi) # endif # ifndef YYMALLOC # define YYMALLOC malloc -# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free -# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif @@ -402,7 +397,7 @@ void free (void *); /* INFRINGES ON USER NAME SPACE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ - || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) + || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc @@ -427,16 +422,16 @@ union yyalloc elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ -# define YYSTACK_RELOCATE(Stack_alloc, Stack) \ - do \ - { \ - YYSIZE_T yynewbytes; \ - YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ - Stack = &yyptr->Stack_alloc; \ - yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ - yyptr += yynewbytes / sizeof (*yyptr); \ - } \ - while (YYID (0)) +# define YYSTACK_RELOCATE(Stack_alloc, Stack) \ + do \ + { \ + YYSIZE_T yynewbytes; \ + YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ + Stack = &yyptr->Stack_alloc; \ + yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ + yyptr += yynewbytes / sizeof (*yyptr); \ + } \ + while (0) #endif @@ -455,7 +450,7 @@ union yyalloc for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ - while (YYID (0)) + while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ @@ -471,17 +466,19 @@ union yyalloc #define YYNNTS 20 /* YYNRULES -- Number of rules. */ #define YYNRULES 32 -/* YYNRULES -- Number of states. */ +/* YYNSTATES -- Number of states. */ #define YYNSTATES 44 -/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ +/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned + by yylex, with out-of-bounds checking. */ #define YYUNDEFTOK 2 #define YYMAXUTOK 269 -#define YYTRANSLATE(YYX) \ +#define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) -/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ +/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM + as returned by yylex, without out-of-bounds checking. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -514,36 +511,13 @@ static const yytype_uint8 yytranslate[] = }; #if YYDEBUG -/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in - YYRHS. */ -static const yytype_uint8 yyprhs[] = -{ - 0, 0, 3, 5, 9, 11, 13, 14, 18, 22, - 23, 28, 32, 34, 36, 37, 41, 45, 47, 49, - 51, 53, 55, 57, 59, 60, 62, 66, 68, 70, - 72, 76, 78 -}; - -/* YYRHS -- A `-1'-separated list of the rules' RHS. */ -static const yytype_int8 yyrhs[] = -{ - 16, 0, -1, 20, -1, 18, 20, 19, -1, 8, - -1, 9, -1, -1, 20, 21, 29, -1, 20, 1, - 29, -1, -1, 4, 14, 22, 27, -1, 24, 26, - 25, -1, 6, -1, 7, -1, -1, 26, 27, 29, - -1, 26, 1, 29, -1, 28, -1, 23, -1, 17, - -1, 30, -1, 3, -1, 4, -1, 5, -1, -1, - 13, -1, 31, 33, 32, -1, 10, -1, 11, -1, - 34, -1, 33, 12, 34, -1, 4, -1, 5, -1 -}; - -/* YYRLINE[YYN] -- source line where rule number YYN was defined. */ + /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_uint8 yyrline[] = { - 0, 86, 86, 88, 90, 92, 94, 94, 94, 96, - 96, 98, 100, 102, 104, 104, 104, 106, 106, 106, - 106, 108, 108, 108, 110, 110, 112, 114, 116, 118, - 118, 120, 120 + 0, 85, 85, 87, 89, 91, 93, 93, 93, 95, + 95, 97, 99, 101, 103, 103, 103, 105, 105, 105, + 105, 107, 107, 107, 109, 109, 111, 113, 115, 117, + 117, 119, 119 }; #endif @@ -558,13 +532,13 @@ static const char *const yytname[] = "config", "group", "group_open", "group_close", "group_items", "assign", "$@1", "list", "list_open", "list_close", "list_items", "value", "literal", "separator", "path", "path_open", "path_close", "path_item", - "path_tag", YY_NULL + "path_tag", YY_NULLPTR }; #endif # ifdef YYPRINT -/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to - token YYLEX-NUM. */ +/* YYTOKNUM[NUM] -- (External) token number corresponding to the + (internal) symbol number NUM (which must be that of a token). */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, @@ -572,46 +546,18 @@ static const yytype_uint16 yytoknum[] = }; # endif -/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ -static const yytype_uint8 yyr1[] = -{ - 0, 15, 16, 17, 18, 19, 20, 20, 20, 22, - 21, 23, 24, 25, 26, 26, 26, 27, 27, 27, - 27, 28, 28, 28, 29, 29, 30, 31, 32, 33, - 33, 34, 34 -}; +#define YYPACT_NINF -11 -/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ -static const yytype_uint8 yyr2[] = -{ - 0, 2, 1, 3, 1, 1, 0, 3, 3, 0, - 4, 3, 1, 1, 0, 3, 3, 1, 1, 1, - 1, 1, 1, 1, 0, 1, 3, 1, 1, 1, - 3, 1, 1 -}; +#define yypact_value_is_default(Yystate) \ + (!!((Yystate) == (-11))) -/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM. - Performed when YYTABLE doesn't specify something else to do. Zero - means the default is an error. */ -static const yytype_uint8 yydefact[] = -{ - 6, 0, 0, 1, 24, 0, 24, 25, 8, 9, - 7, 0, 21, 22, 23, 12, 4, 27, 19, 6, - 18, 14, 10, 17, 20, 0, 0, 0, 31, 32, - 0, 29, 5, 3, 24, 13, 11, 24, 28, 0, - 26, 16, 15, 30 -}; +#define YYTABLE_NINF -3 -/* YYDEFGOTO[NTERM-NUM]. */ -static const yytype_int8 yydefgoto[] = -{ - -1, 1, 18, 19, 33, 2, 6, 11, 20, 21, - 36, 27, 22, 23, 8, 24, 25, 40, 30, 31 -}; +#define yytable_value_is_error(Yytable_value) \ + 0 -/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing - STATE-NUM. */ -#define YYPACT_NINF -11 + /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing + STATE-NUM. */ static const yytype_int8 yypact[] = { -11, 2, 21, -11, -2, 5, -2, -11, -11, -11, @@ -621,17 +567,35 @@ static const yytype_int8 yypact[] = -11, -11, -11, -11 }; -/* YYPGOTO[NTERM-NUM]. */ + /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. + Performed when YYTABLE does not specify something else to do. Zero + means the default is an error. */ +static const yytype_uint8 yydefact[] = +{ + 6, 0, 0, 1, 24, 0, 24, 25, 8, 9, + 7, 0, 21, 22, 23, 12, 4, 27, 19, 6, + 18, 14, 10, 17, 20, 0, 0, 0, 31, 32, + 0, 29, 5, 3, 24, 13, 11, 24, 28, 0, + 26, 16, 15, 30 +}; + + /* YYPGOTO[NTERM-NUM]. */ static const yytype_int8 yypgoto[] = { -11, -11, -11, -11, -11, 11, -11, -11, -11, -11, -11, -11, 6, -11, -6, -11, -11, -11, -11, -10 }; -/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If - positive, shift that token. If negative, reduce the rule which - number is the opposite. If YYTABLE_NINF, syntax error. */ -#define YYTABLE_NINF -3 + /* YYDEFGOTO[NTERM-NUM]. */ +static const yytype_int8 yydefgoto[] = +{ + -1, 1, 18, 19, 33, 2, 6, 11, 20, 21, + 36, 27, 22, 23, 8, 24, 25, 40, 30, 31 +}; + + /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If + positive, shift that token. If negative, reduce the rule whose + number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_int8 yytable[] = { 10, 34, 3, 12, 13, 14, 15, 35, 16, 4, @@ -640,12 +604,6 @@ static const yytype_int8 yytable[] = 26, 42, 0, 37 }; -#define yypact_value_is_default(Yystate) \ - (!!((Yystate) == (-11))) - -#define yytable_value_is_error(Yytable_value) \ - YYID (0) - static const yytype_int8 yycheck[] = { 6, 1, 0, 3, 4, 5, 6, 7, 8, 1, @@ -654,8 +612,8 @@ static const yytype_int8 yycheck[] = 19, 37, -1, 27 }; -/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing - symbol of state STATE-NUM. */ + /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing + symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { 0, 16, 20, 0, 1, 4, 21, 13, 29, 14, @@ -665,30 +623,34 @@ static const yytype_uint8 yystos[] = 32, 29, 29, 34 }; -#define yyerrok (yyerrstatus = 0) -#define yyclearin (yychar = YYEMPTY) -#define YYEMPTY (-2) -#define YYEOF 0 - -#define YYACCEPT goto yyacceptlab -#define YYABORT goto yyabortlab -#define YYERROR goto yyerrorlab - - -/* Like YYERROR except do call yyerror. This remains here temporarily - to ease the transition to the new meaning of YYERROR, for GCC. - Once GCC version 2 has supplanted version 1, this can go. However, - YYFAIL appears to be in use. Nevertheless, it is formally deprecated - in Bison 2.4.2's NEWS entry, where a plan to phase it out is - discussed. */ - -#define YYFAIL goto yyerrlab -#if defined YYFAIL - /* This is here to suppress warnings from the GCC cpp's - -Wunused-macros. Normally we don't worry about that warning, but - some users do, and we want to make it easy for users to remove - YYFAIL uses, which will produce warnings from Bison 2.5. */ -#endif + /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ +static const yytype_uint8 yyr1[] = +{ + 0, 15, 16, 17, 18, 19, 20, 20, 20, 22, + 21, 23, 24, 25, 26, 26, 26, 27, 27, 27, + 27, 28, 28, 28, 29, 29, 30, 31, 32, 33, + 33, 34, 34 +}; + + /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ +static const yytype_uint8 yyr2[] = +{ + 0, 2, 1, 3, 1, 1, 0, 3, 3, 0, + 4, 3, 1, 1, 0, 3, 3, 1, 1, 1, + 1, 1, 1, 1, 0, 1, 3, 1, 1, 1, + 3, 1, 1 +}; + + +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = YYEMPTY) +#define YYEMPTY (-2) +#define YYEOF 0 + +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrorlab + #define YYRECOVERING() (!!yyerrstatus) @@ -705,27 +667,15 @@ do \ else \ { \ yyerror (lexer, handlers, YY_("syntax error: cannot back up")); \ - YYERROR; \ - } \ -while (YYID (0)) + YYERROR; \ + } \ +while (0) /* Error token number */ -#define YYTERROR 1 -#define YYERRCODE 256 - - -/* This macro is provided for backward compatibility. */ -#ifndef YY_LOCATION_PRINT -# define YY_LOCATION_PRINT(File, Loc) ((void) 0) -#endif +#define YYTERROR 1 +#define YYERRCODE 256 -/* YYLEX -- calling `yylex' with the right arguments. */ -#ifdef YYLEX_PARAM -# define YYLEX yylex (&yylval, YYLEX_PARAM) -#else -# define YYLEX yylex (&yylval, lexer) -#endif /* Enable debugging if requested. */ #if YYDEBUG @@ -735,60 +685,48 @@ while (YYID (0)) # define YYFPRINTF fprintf # endif -# define YYDPRINTF(Args) \ -do { \ - if (yydebug) \ - YYFPRINTF Args; \ -} while (YYID (0)) +# define YYDPRINTF(Args) \ +do { \ + if (yydebug) \ + YYFPRINTF Args; \ +} while (0) + +/* This macro is provided for backward compatibility. */ +#ifndef YY_LOCATION_PRINT +# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +#endif + -# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ -do { \ - if (yydebug) \ - { \ - YYFPRINTF (stderr, "%s ", Title); \ - yy_symbol_print (stderr, \ - Type, Value, lexer, handlers); \ - YYFPRINTF (stderr, "\n"); \ - } \ -} while (YYID (0)) +# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ +do { \ + if (yydebug) \ + { \ + YYFPRINTF (stderr, "%s ", Title); \ + yy_symbol_print (stderr, \ + Type, Value, lexer, handlers); \ + YYFPRINTF (stderr, "\n"); \ + } \ +} while (0) -/*--------------------------------. -| Print this symbol on YYOUTPUT. | -`--------------------------------*/ +/*----------------------------------------. +| Print this symbol's value on YYOUTPUT. | +`----------------------------------------*/ -/*ARGSUSED*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static void yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, yyscan_t lexer, struct TsConfigHandlers* handlers) -#else -static void -yy_symbol_value_print (yyoutput, yytype, yyvaluep, lexer, handlers) - FILE *yyoutput; - int yytype; - YYSTYPE const * const yyvaluep; - yyscan_t lexer; - struct TsConfigHandlers* handlers; -#endif { FILE *yyo = yyoutput; YYUSE (yyo); - if (!yyvaluep) - return; YYUSE (lexer); YYUSE (handlers); + if (!yyvaluep) + return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); -# else - YYUSE (yyoutput); # endif - switch (yytype) - { - default: - break; - } + YYUSE (yytype); } @@ -796,24 +734,11 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep, lexer, handlers) | Print this symbol on YYOUTPUT. | `--------------------------------*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static void yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, yyscan_t lexer, struct TsConfigHandlers* handlers) -#else -static void -yy_symbol_print (yyoutput, yytype, yyvaluep, lexer, handlers) - FILE *yyoutput; - int yytype; - YYSTYPE const * const yyvaluep; - yyscan_t lexer; - struct TsConfigHandlers* handlers; -#endif { - if (yytype < YYNTOKENS) - YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); - else - YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]); + YYFPRINTF (yyoutput, "%s %s (", + yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); yy_symbol_value_print (yyoutput, yytype, yyvaluep, lexer, handlers); YYFPRINTF (yyoutput, ")"); @@ -824,16 +749,8 @@ yy_symbol_print (yyoutput, yytype, yyvaluep, lexer, handlers) | TOP (included). | `------------------------------------------------------------------*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) -#else -static void -yy_stack_print (yybottom, yytop) - yytype_int16 *yybottom; - yytype_int16 *yytop; -#endif { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) @@ -844,51 +761,42 @@ yy_stack_print (yybottom, yytop) YYFPRINTF (stderr, "\n"); } -# define YY_STACK_PRINT(Bottom, Top) \ -do { \ - if (yydebug) \ - yy_stack_print ((Bottom), (Top)); \ -} while (YYID (0)) +# define YY_STACK_PRINT(Bottom, Top) \ +do { \ + if (yydebug) \ + yy_stack_print ((Bottom), (Top)); \ +} while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -static void -yy_reduce_print (YYSTYPE *yyvsp, int yyrule, yyscan_t lexer, struct TsConfigHandlers* handlers) -#else static void -yy_reduce_print (yyvsp, yyrule, lexer, handlers) - YYSTYPE *yyvsp; - int yyrule; - yyscan_t lexer; - struct TsConfigHandlers* handlers; -#endif +yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule, yyscan_t lexer, struct TsConfigHandlers* handlers) { + unsigned long int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; - unsigned long int yylno = yyrline[yyrule]; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", - yyrule - 1, yylno); + yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); - yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], - &(yyvsp[(yyi + 1) - (yynrhs)]) - , lexer, handlers); + yy_symbol_print (stderr, + yystos[yyssp[yyi + 1 - yynrhs]], + &(yyvsp[(yyi + 1) - (yynrhs)]) + , lexer, handlers); YYFPRINTF (stderr, "\n"); } } -# define YY_REDUCE_PRINT(Rule) \ -do { \ - if (yydebug) \ - yy_reduce_print (yyvsp, Rule, lexer, handlers); \ -} while (YYID (0)) +# define YY_REDUCE_PRINT(Rule) \ +do { \ + if (yydebug) \ + yy_reduce_print (yyssp, yyvsp, Rule, lexer, handlers); \ +} while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ @@ -902,7 +810,7 @@ int yydebug; /* YYINITDEPTH -- initial size of the parser's stacks. */ -#ifndef YYINITDEPTH +#ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif @@ -925,15 +833,8 @@ int yydebug; # define yystrlen strlen # else /* Return the length of YYSTR. */ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static YYSIZE_T yystrlen (const char *yystr) -#else -static YYSIZE_T -yystrlen (yystr) - const char *yystr; -#endif { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) @@ -949,16 +850,8 @@ yystrlen (yystr) # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static char * yystpcpy (char *yydest, const char *yysrc) -#else -static char * -yystpcpy (yydest, yysrc) - char *yydest; - const char *yysrc; -#endif { char *yyd = yydest; const char *yys = yysrc; @@ -985,30 +878,30 @@ yytnamerr (char *yyres, const char *yystr) if (*yystr == '"') { YYSIZE_T yyn = 0; - const char *yyp = yystr; + char const *yyp = yystr; for (;;) - switch (*++yyp) - { - case '\'': - case ',': - goto do_not_strip_quotes; - - case '\\': - if (*++yyp != '\\') - goto do_not_strip_quotes; - /* Fall through. */ - default: - if (yyres) - yyres[yyn] = *yyp; - yyn++; - break; - - case '"': - if (yyres) - yyres[yyn] = '\0'; - return yyn; - } + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + /* Fall through. */ + default: + if (yyres) + yyres[yyn] = *yyp; + yyn++; + break; + + case '"': + if (yyres) + yyres[yyn] = '\0'; + return yyn; + } do_not_strip_quotes: ; } @@ -1031,22 +924,18 @@ static int yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, yytype_int16 *yyssp, int yytoken) { - YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]); + YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); YYSIZE_T yysize = yysize0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ - const char *yyformat = YY_NULL; + const char *yyformat = YY_NULLPTR; /* Arguments of yyformat. */ - const char *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; + char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Number of reported tokens (one for the "unexpected", one per "expected"). */ int yycount = 0; /* There are many possibilities here to consider: - - Assume YYFAIL is not used. It's too flawed to consider. See - - for details. YYERROR is fine as it does not invoke this - function. - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected @@ -1096,7 +985,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, } yyarg[yycount++] = yytname[yyx]; { - YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]); + YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; @@ -1163,35 +1052,19 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, | Release the memory associated to this symbol. | `-----------------------------------------------*/ -/*ARGSUSED*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, yyscan_t lexer, struct TsConfigHandlers* handlers) -#else -static void -yydestruct (yymsg, yytype, yyvaluep, lexer, handlers) - const char *yymsg; - int yytype; - YYSTYPE *yyvaluep; - yyscan_t lexer; - struct TsConfigHandlers* handlers; -#endif { YYUSE (yyvaluep); YYUSE (lexer); YYUSE (handlers); - if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); - switch (yytype) - { - - default: - break; - } + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + YYUSE (yytype); + YY_IGNORE_MAYBE_UNINITIALIZED_END } @@ -1201,57 +1074,18 @@ yydestruct (yymsg, yytype, yyvaluep, lexer, handlers) | yyparse. | `----------*/ -#ifdef YYPARSE_PARAM -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -int -yyparse (void *YYPARSE_PARAM) -#else -int -yyparse (YYPARSE_PARAM) - void *YYPARSE_PARAM; -#endif -#else /* ! YYPARSE_PARAM */ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) int yyparse (yyscan_t lexer, struct TsConfigHandlers* handlers) -#else -int -yyparse (lexer, handlers) - yyscan_t lexer; - struct TsConfigHandlers* handlers; -#endif -#endif { /* The lookahead symbol. */ int yychar; -#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ -/* Suppress an incorrect diagnostic about yylval being uninitialized. */ -# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ - _Pragma ("GCC diagnostic push") \ - _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ - _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") -# define YY_IGNORE_MAYBE_UNINITIALIZED_END \ - _Pragma ("GCC diagnostic pop") -#else +/* The semantic value of the lookahead symbol. */ /* Default value used for initialization, for pacifying older GCCs or non-GCC compilers. */ -static YYSTYPE yyval_default; -# define YY_INITIAL_VALUE(Value) = Value -#endif -#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN -# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN -# define YY_IGNORE_MAYBE_UNINITIALIZED_END -#endif -#ifndef YY_INITIAL_VALUE -# define YY_INITIAL_VALUE(Value) /* Nothing. */ -#endif - -/* The semantic value of the lookahead symbol. */ -YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); +YY_INITIAL_VALUE (static YYSTYPE yyval_default;) +YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default); /* Number of syntax errors so far. */ int yynerrs; @@ -1261,8 +1095,8 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); int yyerrstatus; /* The stacks and their tools: - `yyss': related to states. - `yyvs': related to semantic values. + 'yyss': related to states. + 'yyvs': related to semantic values. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ @@ -1330,23 +1164,23 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); #ifdef yyoverflow { - /* Give user a chance to reallocate the stack. Use copies of - these so that the &'s don't force the real ones into - memory. */ - YYSTYPE *yyvs1 = yyvs; - yytype_int16 *yyss1 = yyss; - - /* Each stack pointer address is followed by the size of the - data in use in that stack, in bytes. This used to be a - conditional around just the two extra args, but that might - be undefined if yyoverflow is a macro. */ - yyoverflow (YY_("memory exhausted"), - &yyss1, yysize * sizeof (*yyssp), - &yyvs1, yysize * sizeof (*yyvsp), - &yystacksize); - - yyss = yyss1; - yyvs = yyvs1; + /* Give user a chance to reallocate the stack. Use copies of + these so that the &'s don't force the real ones into + memory. */ + YYSTYPE *yyvs1 = yyvs; + yytype_int16 *yyss1 = yyss; + + /* Each stack pointer address is followed by the size of the + data in use in that stack, in bytes. This used to be a + conditional around just the two extra args, but that might + be undefined if yyoverflow is a macro. */ + yyoverflow (YY_("memory exhausted"), + &yyss1, yysize * sizeof (*yyssp), + &yyvs1, yysize * sizeof (*yyvsp), + &yystacksize); + + yyss = yyss1; + yyvs = yyvs1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE @@ -1354,22 +1188,22 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) - goto yyexhaustedlab; + goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) - yystacksize = YYMAXDEPTH; + yystacksize = YYMAXDEPTH; { - yytype_int16 *yyss1 = yyss; - union yyalloc *yyptr = - (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); - if (! yyptr) - goto yyexhaustedlab; - YYSTACK_RELOCATE (yyss_alloc, yyss); - YYSTACK_RELOCATE (yyvs_alloc, yyvs); + yytype_int16 *yyss1 = yyss; + union yyalloc *yyptr = + (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); + if (! yyptr) + goto yyexhaustedlab; + YYSTACK_RELOCATE (yyss_alloc, yyss); + YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE - if (yyss1 != yyssa) - YYSTACK_FREE (yyss1); + if (yyss1 != yyssa) + YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ @@ -1378,10 +1212,10 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); yyvsp = yyvs + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", - (unsigned long int) yystacksize)); + (unsigned long int) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) - YYABORT; + YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); @@ -1410,7 +1244,7 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); - yychar = YYLEX; + yychar = yylex (&yylval, lexer); } if (yychar <= YYEOF) @@ -1475,7 +1309,7 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: - `$$ = $1'. + '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison @@ -1489,68 +1323,67 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); switch (yyn) { case 4: -/* Line 1792 of yacc.c */ -#line 90 "TsConfigGrammar.y" - { HANDLE_EVENT(GroupOpen, (yyvsp[(1) - (1)])); } +#line 89 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(GroupOpen, (yyvsp[0])); } +#line 1329 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 5: -/* Line 1792 of yacc.c */ -#line 92 "TsConfigGrammar.y" - { HANDLE_EVENT(GroupClose, (yyvsp[(1) - (1)])); } +#line 91 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(GroupClose, (yyvsp[0])); } +#line 1335 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 9: -/* Line 1792 of yacc.c */ -#line 96 "TsConfigGrammar.y" - { HANDLE_EVENT(GroupName, (yyvsp[(1) - (2)])); } +#line 95 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(GroupName, (yyvsp[-1])); } +#line 1341 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 12: -/* Line 1792 of yacc.c */ -#line 100 "TsConfigGrammar.y" - { HANDLE_EVENT(ListOpen, (yyvsp[(1) - (1)])); } +#line 99 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(ListOpen, (yyvsp[0])); } +#line 1347 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 13: -/* Line 1792 of yacc.c */ -#line 102 "TsConfigGrammar.y" - { HANDLE_EVENT(ListClose, (yyvsp[(1) - (1)])); } +#line 101 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(ListClose, (yyvsp[0])); } +#line 1353 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 17: -/* Line 1792 of yacc.c */ -#line 106 "TsConfigGrammar.y" - { HANDLE_EVENT(LiteralValue, (yyvsp[(1) - (1)])); } +#line 105 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(LiteralValue, (yyvsp[0])); } +#line 1359 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 27: -/* Line 1792 of yacc.c */ -#line 114 "TsConfigGrammar.y" - { HANDLE_EVENT(PathOpen, (yyvsp[(1) - (1)])); } +#line 113 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(PathOpen, (yyvsp[0])); } +#line 1365 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 28: -/* Line 1792 of yacc.c */ -#line 116 "TsConfigGrammar.y" - { HANDLE_EVENT(PathClose, (yyvsp[(1) - (1)])); } +#line 115 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(PathClose, (yyvsp[0])); } +#line 1371 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 31: -/* Line 1792 of yacc.c */ -#line 120 "TsConfigGrammar.y" - { HANDLE_EVENT(PathTag, (yyvsp[(1) - (1)])); } +#line 119 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(PathTag, (yyvsp[0])); } +#line 1377 "TsConfigGrammar.c" /* yacc.c:1646 */ break; case 32: -/* Line 1792 of yacc.c */ -#line 120 "TsConfigGrammar.y" - { HANDLE_EVENT(PathIndex, (yyvsp[(1) - (1)])); } +#line 119 "TsConfigGrammar.y" /* yacc.c:1646 */ + { HANDLE_EVENT(PathIndex, (yyvsp[0])); } +#line 1383 "TsConfigGrammar.c" /* yacc.c:1646 */ break; -/* Line 1792 of yacc.c */ -#line 1554 "TsConfigGrammar.c" +#line 1387 "TsConfigGrammar.c" /* yacc.c:1646 */ default: break; } /* User semantic actions sometimes alter yychar, and that requires @@ -1572,7 +1405,7 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); *++yyvsp = yyval; - /* Now `shift' the result of the reduction. Determine what state + /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ @@ -1587,9 +1420,9 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); goto yynewstate; -/*------------------------------------. -| yyerrlab -- here on detecting error | -`------------------------------------*/ +/*--------------------------------------. +| yyerrlab -- here on detecting error. | +`--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ @@ -1605,7 +1438,7 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); # define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ yyssp, yytoken) { - const char *yymsgp = YY_("syntax error"); + char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = YYSYNTAX_ERROR; if (yysyntax_error_status == 0) @@ -1640,20 +1473,20 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an - error, discard it. */ + error, discard it. */ if (yychar <= YYEOF) - { - /* Return failure if at end of input. */ - if (yychar == YYEOF) - YYABORT; - } + { + /* Return failure if at end of input. */ + if (yychar == YYEOF) + YYABORT; + } else - { - yydestruct ("Error: discarding", - yytoken, &yylval, lexer, handlers); - yychar = YYEMPTY; - } + { + yydestruct ("Error: discarding", + yytoken, &yylval, lexer, handlers); + yychar = YYEMPTY; + } } /* Else will try to reuse lookahead token after shifting the error @@ -1672,7 +1505,7 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); if (/*CONSTCOND*/ 0) goto yyerrorlab; - /* Do not reclaim the symbols of the rule which action triggered + /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; @@ -1685,29 +1518,29 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: - yyerrstatus = 3; /* Each real token shifted decrements this. */ + yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) - { - yyn += YYTERROR; - if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) - { - yyn = yytable[yyn]; - if (0 < yyn) - break; - } - } + { + yyn += YYTERROR; + if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) + { + yyn = yytable[yyn]; + if (0 < yyn) + break; + } + } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) - YYABORT; + YYABORT; yydestruct ("Error: popping", - yystos[yystate], yyvsp, lexer, handlers); + yystos[yystate], yyvsp, lexer, handlers); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); @@ -1758,14 +1591,14 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval, lexer, handlers); } - /* Do not reclaim the symbols of the rule which action triggered + /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", - yystos[*yyssp], yyvsp, lexer, handlers); + yystos[*yyssp], yyvsp, lexer, handlers); YYPOPSTACK (1); } #ifndef yyoverflow @@ -1776,13 +1609,9 @@ YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif - /* Make sure YYID is used. */ - return YYID (yyresult); + return yyresult; } - - -/* Line 2055 of yacc.c */ -#line 122 "TsConfigGrammar.y" +#line 121 "TsConfigGrammar.y" /* yacc.c:1906 */ # endif // __clang_analyzer__ diff --git a/lib/tsconfig/TsConfigGrammar.h b/lib/tsconfig/TsConfigGrammar.h index 52f083bbf28..4d98d3ad2fe 100644 --- a/lib/tsconfig/TsConfigGrammar.h +++ b/lib/tsconfig/TsConfigGrammar.h @@ -1,8 +1,8 @@ -/* A Bison parser, made by GNU Bison 2.7. */ +/* A Bison parser, made by GNU Bison 3.0.4. */ /* Bison interface for Yacc-like parsers in C - Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc. + Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -32,7 +32,7 @@ #ifndef YY_TSCONFIG_TSCONFIGGRAMMAR_H_INCLUDED # define YY_TSCONFIG_TSCONFIGGRAMMAR_H_INCLUDED -/* Enabling traces. */ +/* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif @@ -40,8 +40,7 @@ extern int tsconfigdebug; #endif /* "%code requires" blocks. */ -/* Line 2058 of yacc.c */ -#line 1 "TsConfigGrammar.y" +#line 1 "TsConfigGrammar.y" /* yacc.c:1909 */ /** @file @@ -66,29 +65,26 @@ extern int tsconfigdebug; limitations under the License. */ +#line 69 "TsConfigGrammar.h" /* yacc.c:1909 */ -/* Line 2058 of yacc.c */ -#line 72 "TsConfigGrammar.h" - -/* Tokens. */ +/* Token type. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE - /* Put the tokens into the symbol table, so that GDB and other debuggers - know about them. */ - enum yytokentype { - STRING = 258, - IDENT = 259, - INTEGER = 260, - LIST_OPEN = 261, - LIST_CLOSE = 262, - GROUP_OPEN = 263, - GROUP_CLOSE = 264, - PATH_OPEN = 265, - PATH_CLOSE = 266, - PATH_SEPARATOR = 267, - SEPARATOR = 268, - ASSIGN = 269 - }; + enum yytokentype + { + STRING = 258, + IDENT = 259, + INTEGER = 260, + LIST_OPEN = 261, + LIST_CLOSE = 262, + GROUP_OPEN = 263, + GROUP_CLOSE = 264, + PATH_OPEN = 265, + PATH_CLOSE = 266, + PATH_SEPARATOR = 267, + SEPARATOR = 268, + ASSIGN = 269 + }; #endif /* Tokens. */ #define STRING 258 @@ -104,28 +100,15 @@ extern int tsconfigdebug; #define SEPARATOR 268 #define ASSIGN 269 - - +/* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED typedef int YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 -# define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 #endif -#ifdef YYPARSE_PARAM -#if defined __STDC__ || defined __cplusplus -int tsconfigparse (void *YYPARSE_PARAM); -#else -int tsconfigparse (); -#endif -#else /* ! YYPARSE_PARAM */ -#if defined __STDC__ || defined __cplusplus + int tsconfigparse (yyscan_t lexer, struct TsConfigHandlers* handlers); -#else -int tsconfigparse (); -#endif -#endif /* ! YYPARSE_PARAM */ #endif /* !YY_TSCONFIG_TSCONFIGGRAMMAR_H_INCLUDED */ diff --git a/lib/tsconfig/TsConfigGrammar.y b/lib/tsconfig/TsConfigGrammar.y index 7dcfc874d84..293a2afc728 100644 --- a/lib/tsconfig/TsConfigGrammar.y +++ b/lib/tsconfig/TsConfigGrammar.y @@ -33,7 +33,6 @@ # define YYFREE free # include "TsConfigParseEvents.h" -# include "ts/ink_defs.h" // Types we need for the lexer. typedef void* yyscan_t; @@ -50,7 +49,7 @@ extern int tsconfiglex(YYSTYPE* yylval, yyscan_t lexer); } int tsconfigerror( - yyscan_t lexer ATS_UNUSED, + yyscan_t lexer, struct TsConfigHandlers* handlers, char const* text ) { diff --git a/mgmt/LocalManager.cc b/mgmt/LocalManager.cc index 2a89fac3997..f3da1370044 100644 --- a/mgmt/LocalManager.cc +++ b/mgmt/LocalManager.cc @@ -147,22 +147,14 @@ LocalManager::processRunning() } } -LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on), configFiles(nullptr) +LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on) { bool found; ats_scoped_str rundir(RecConfigReadRuntimeDir()); ats_scoped_str bindir(RecConfigReadBinDir()); ats_scoped_str sysconfdir(RecConfigReadConfigDir()); - syslog_facility = 0; - - proxy_recoverable = true; - proxy_started_at = -1; - proxy_launch_count = 0; - manager_started_at = time(nullptr); - proxy_launch_outstanding = false; - mgmt_shutdown_outstanding = MGMT_PENDING_NONE; - proxy_running = 0; + manager_started_at = time(nullptr); RecRegisterStatInt(RECT_NODE, "proxy.node.proxy_running", 0, RECP_NON_PERSISTENT); @@ -212,7 +204,6 @@ LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on), proxy_name = REC_readString("proxy.config.proxy_name", &found); proxy_binary = REC_readString("proxy.config.proxy_binary", &found); env_prep = REC_readString("proxy.config.env_prep", &found); - proxy_options = nullptr; // Calculate proxy_binary from the absolute bin_path absolute_proxy_binary = Layout::relative_to(bindir, proxy_binary); @@ -223,12 +214,6 @@ LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on), mgmt_fatal(0, "[LocalManager::LocalManager] please set bin path 'proxy.config.bin_path' \n"); } - watched_process_pid = -1; - - process_server_sockfd = -1; - watched_process_fd = -1; - proxy_launch_pid = -1; - return; } diff --git a/mgmt/LocalManager.h b/mgmt/LocalManager.h index d35fe0d44e2..672731c447e 100644 --- a/mgmt/LocalManager.h +++ b/mgmt/LocalManager.h @@ -40,6 +40,7 @@ #if TS_HAS_WCCP #include #endif +#include class FileManager; @@ -88,13 +89,13 @@ class LocalManager : public BaseManager bool processRunning(); volatile bool run_proxy; - volatile bool proxy_recoverable; // false if traffic_server cannot recover with a reboot + volatile bool proxy_recoverable = true; // false if traffic_server cannot recover with a reboot volatile time_t manager_started_at; - volatile time_t proxy_started_at; - volatile int proxy_launch_count; - volatile bool proxy_launch_outstanding; - volatile ManagementPendingOperation mgmt_shutdown_outstanding; - volatile int proxy_running; + volatile time_t proxy_started_at = -1; + volatile int proxy_launch_count = 0; + volatile bool proxy_launch_outstanding = false; + volatile ManagementPendingOperation mgmt_shutdown_outstanding = MGMT_PENDING_NONE; + volatile int proxy_running = 0; HttpProxyPort::Group m_proxy_ports; // Local inbound addresses to bind, if set. IpAddr m_inbound_ip4; @@ -106,19 +107,19 @@ class LocalManager : public BaseManager char *absolute_proxy_binary; char *proxy_name; char *proxy_binary; - char *proxy_options; // These options should persist across proxy reboots + char *proxy_options = nullptr; // These options should persist across proxy reboots char *env_prep; - int process_server_sockfd; - volatile int watched_process_fd; - volatile pid_t proxy_launch_pid; + int process_server_sockfd = ts::NO_FD; + volatile int watched_process_fd = ts::NO_FD; + volatile pid_t proxy_launch_pid = -1; - Alarms *alarm_keeper; - FileManager *configFiles; + Alarms *alarm_keeper = nullptr; + FileManager *configFiles = nullptr; - volatile pid_t watched_process_pid; + volatile pid_t watched_process_pid = -1; - int syslog_facility; + int syslog_facility = LOG_DAEMON; #if TS_HAS_WCCP wccp::Cache wccp_cache; diff --git a/mgmt/RecordsConfig.cc b/mgmt/RecordsConfig.cc index 4b2f23eee3d..9e4b86fe11a 100644 --- a/mgmt/RecordsConfig.cc +++ b/mgmt/RecordsConfig.cc @@ -443,8 +443,6 @@ static const RecordElement RecordsConfig[] = , {RECT_CONFIG, "proxy.config.http.attach_server_session_to_client", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} , - {RECT_CONFIG, "proxy.config.http.safe_requests_retryable", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} - , {RECT_CONFIG, "proxy.config.net.max_connections_in", RECD_INT, "30000", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL} , {RECT_CONFIG, "proxy.config.net.max_connections_active_in", RECD_INT, "10000", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL} diff --git a/plugins/background_fetch/background_fetch.cc b/plugins/background_fetch/background_fetch.cc index 7103c696bfb..1e4a299deb9 100644 --- a/plugins/background_fetch/background_fetch.cc +++ b/plugins/background_fetch/background_fetch.cc @@ -560,7 +560,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = (char *)"dev@trafficserver.apache.org"; if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); } TSCont cont = TSContCreate(cont_handle_response, nullptr); @@ -576,7 +576,7 @@ TSPluginInit(int argc, const char *argv[]) BgFetchState::getInstance().createLog(optarg); break; case 'c': - TSDebug(PLUGIN_NAME, "config file %s..", optarg); + TSDebug(PLUGIN_NAME, "config file '%s'", optarg); gConfig->readConfig(optarg); break; } @@ -667,7 +667,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo * /* rri */) TSHttpTxnHookAdd(txnp, TS_HTTP_READ_RESPONSE_HDR_HOOK, config->getCont()); TSHttpTxnHookAdd(txnp, TS_HTTP_TXN_CLOSE_HOOK, config->getCont()); - TSDebug(PLUGIN_NAME, "background fetch TSRemapDoRemap..."); + TSDebug(PLUGIN_NAME, "background fetch TSRemapDoRemap"); TSHandleMLocRelease(bufp, req_hdrs, field_loc); } TSHandleMLocRelease(bufp, TS_NULL_MLOC, req_hdrs); diff --git a/plugins/esi/combo_handler.cc b/plugins/esi/combo_handler.cc index 8bf398d6c58..1e22a43cd02 100644 --- a/plugins/esi/combo_handler.cc +++ b/plugins/esi/combo_handler.cc @@ -318,7 +318,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[combo_handler][%s] plugin registration failed.", __FUNCTION__); + TSError("[combo_handler][%s] plugin registration failed", __FUNCTION__); return; } @@ -403,7 +403,7 @@ handleReadRequestHeader(TSCont /* contp ATS_UNUSED */, TSEvent event, void *edat } LOG_DEBUG("combo is enabled for this channel"); - LOG_DEBUG("handling TS_EVENT_HTTP_OS_DNS event..."); + LOG_DEBUG("handling TS_EVENT_HTTP_OS_DNS event"); TSEvent reenable_to_event = TS_EVENT_HTTP_CONTINUE; TSMBuffer bufp; @@ -484,7 +484,7 @@ getDefaultBucket(TSHttpTxn /* txnp ATS_UNUSED */, TSMBuffer bufp, TSMLoc hdr_obj field_loc = TSMimeHdrFieldFind(bufp, hdr_obj, TS_MIME_FIELD_HOST, -1); if (field_loc == TS_NULL_MLOC) { - LOG_ERROR("Host field not found."); + LOG_ERROR("Host field not found"); return false; } @@ -577,7 +577,7 @@ parseQueryParameters(const char *query, int query_len, ClientRequest &creq) LOG_DEBUG("Signature [%.*s] on query [%.*s] is invalid", param_len - 4, param + 4, param_start_pos, query); } } else { - LOG_DEBUG("Verification not configured; ignoring signature..."); + LOG_DEBUG("Verification not configured, ignoring signature"); } break; // nothing useful after the signature } @@ -751,7 +751,7 @@ handleServerEvent(TSCont contp, TSEvent event, void *edata) } if (int_data->read_complete && int_data->write_complete) { - LOG_DEBUG("Completed request processing. Shutting down..."); + LOG_DEBUG("Completed request processing, shutting down"); delete int_data; TSContDestroy(contp); } diff --git a/plugins/esi/esi.cc b/plugins/esi/esi.cc index ce085c25210..5dbbb22d3eb 100644 --- a/plugins/esi/esi.cc +++ b/plugins/esi/esi.cc @@ -711,7 +711,7 @@ transformData(TSCont contp) } } if (process_input_complete) { - TSDebug(cont_data->debug_tag, "[%s] Completed reading input...", __FUNCTION__); + TSDebug(cont_data->debug_tag, "[%s] Completed reading input", __FUNCTION__); if (cont_data->input_type == DATA_TYPE_PACKED_ESI) { TSDebug(DEBUG_TAG, "[%s] Going to use packed node list of size %d", __FUNCTION__, (int)cont_data->packed_node_list.size()); if (cont_data->esi_proc->usePackedNodeList(cont_data->packed_node_list) == EsiProcessor::UNPACK_FAILURE) { @@ -919,7 +919,7 @@ transformHandler(TSCont contp, TSEvent event, void *edata) is_fetch_event = cont_data->data_fetcher->isFetchEvent(event); if (cont_data->xform_closed) { - TSDebug(cont_debug_tag, "[%s] Transformation closed. Post-processing...", __FUNCTION__); + TSDebug(cont_debug_tag, "[%s] Transformation closed, post-processing", __FUNCTION__); if (cont_data->curr_state == ContData::PROCESSING_COMPLETE) { TSDebug(cont_debug_tag, "[%s] Processing is complete, not processing current event %d", __FUNCTION__, event); process_event = false; @@ -975,13 +975,13 @@ transformHandler(TSCont contp, TSEvent event, void *edata) break; case TS_EVENT_IMMEDIATE: - TSDebug(cont_debug_tag, "[%s] handling TS_EVENT_IMMEDIATE...", __FUNCTION__); + TSDebug(cont_debug_tag, "[%s] handling TS_EVENT_IMMEDIATE", __FUNCTION__); transformData(contp); break; default: if (is_fetch_event) { - TSDebug(cont_debug_tag, "[%s] Handling fetch event %d...", __FUNCTION__, event); + TSDebug(cont_debug_tag, "[%s] Handling fetch event %d", __FUNCTION__, event); if (cont_data->data_fetcher->handleFetchEvent(event, edata)) { if ((cont_data->curr_state == ContData::FETCHING_DATA) || (cont_data->curr_state == ContData::READING_ESI_DOC)) { // there's a small chance that fetcher is ready even before @@ -1020,7 +1020,7 @@ transformHandler(TSCont contp, TSEvent event, void *edata) return 1; lShutdown: - TSDebug(cont_data->debug_tag, "[%s] transformation closed; cleaning up data...", __FUNCTION__); + TSDebug(cont_data->debug_tag, "[%s] transformation closed; cleaning up data", __FUNCTION__); delete cont_data; TSContDestroy(contp); return 1; @@ -1504,7 +1504,7 @@ globalHookHandler(TSCont contp, TSEvent event, void *edata) switch (event) { case TS_EVENT_HTTP_READ_REQUEST_HDR: - TSDebug(DEBUG_TAG, "[%s] handling read request header event...", __FUNCTION__); + TSDebug(DEBUG_TAG, "[%s] handling read request header event", __FUNCTION__); if (intercept_req) { if (!setupServerIntercept(txnp)) { TSError("[esi][%s] Could not setup server intercept", __FUNCTION__); @@ -1521,7 +1521,7 @@ globalHookHandler(TSCont contp, TSEvent event, void *edata) if (!intercept_req) { if (event == TS_EVENT_HTTP_READ_RESPONSE_HDR) { bool mask_cache_headers = false; - TSDebug(DEBUG_TAG, "[%s] handling read response header event...", __FUNCTION__); + TSDebug(DEBUG_TAG, "[%s] handling read response header event", __FUNCTION__); if (isCacheObjTransformable(txnp, &intercept_header, &head_only)) { // transformable cache object will definitely have a // transformation already as cache_lookup_complete would @@ -1540,7 +1540,7 @@ globalHookHandler(TSCont contp, TSEvent event, void *edata) maskOsCacheHeaders(txnp); } } else { - TSDebug(DEBUG_TAG, "[%s] handling cache lookup complete event...", __FUNCTION__); + TSDebug(DEBUG_TAG, "[%s] handling cache lookup complete event", __FUNCTION__); if (isCacheObjTransformable(txnp, &intercept_header, &head_only)) { // we make the assumption above that a transformable cache // object would already have a tranformation. We should revisit @@ -1666,7 +1666,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = (char *)"dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[esi][%s] plugin registration failed.", __FUNCTION__); + TSError("[esi][%s] plugin registration failed", __FUNCTION__); return; } diff --git a/plugins/experimental/acme/acme.c b/plugins/experimental/acme/acme.c index b6b46f36760..0307a532861 100644 --- a/plugins/experimental/acme/acme.c +++ b/plugins/experimental/acme/acme.c @@ -337,7 +337,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError("[%s] Plugin registration failed.", PLUGIN_NAME); + TSError("[%s] Plugin registration failed", PLUGIN_NAME); return; } diff --git a/plugins/experimental/ats_pagespeed/ats_pagespeed.cc b/plugins/experimental/ats_pagespeed/ats_pagespeed.cc index fdb08bbf3cb..fe90fd1c75c 100644 --- a/plugins/experimental/ats_pagespeed/ats_pagespeed.cc +++ b/plugins/experimental/ats_pagespeed/ats_pagespeed.cc @@ -219,7 +219,7 @@ ps_determine_request_options(const RewriteOptions *domain_options, /* may be nul if (!server_context->GetQueryOptions(request_context, domain_options, url, request_headers, response_headers, &rewrite_query)) { // Failed to parse query params or request headers. Treat this as if there // were no query params given. - TSError("[ats_pagespeed] ps_route request: parsing headers or query params failed."); + TSError("[ats_pagespeed] ps_route request: parsing headers or query params failed"); return NULL; } diff --git a/plugins/experimental/buffer_upload/buffer_upload.cc b/plugins/experimental/buffer_upload/buffer_upload.cc index 2630823930a..8e8c26dd8dd 100644 --- a/plugins/experimental/buffer_upload/buffer_upload.cc +++ b/plugins/experimental/buffer_upload/buffer_upload.cc @@ -190,7 +190,7 @@ call_httpconnect(TSCont contp, pvc_state *my_state) // unsigned int client_ip = TSHttpTxnClientIPGet(my_state->http_txnp); sockaddr const *client_ip = TSHttpTxnClientAddrGet(my_state->http_txnp); - TSDebug(DEBUG_TAG, "call TSHttpConnect() ..."); + TSDebug(DEBUG_TAG, "call TSHttpConnect()"); if ((my_state->net_vc = TSHttpConnect(client_ip)) == nullptr) { LOG_ERROR_AND_RETURN("TSHttpConnect"); } @@ -732,7 +732,7 @@ attach_pvc_plugin(TSCont /* contp ATS_UNUSED */, TSEvent event, void *edata) if (NOT_VALID_PTR(field_loc)) { // if (VALID_PTR(str)) // TSHandleStringRelease(req_bufp, url_loc, str); - LOG_ERROR("Host field not found."); + LOG_ERROR("Host field not found"); TSHandleMLocRelease(req_bufp, req_loc, url_loc); TSHandleMLocRelease(req_bufp, TS_NULL_MLOC, req_loc); break; @@ -884,7 +884,7 @@ attach_pvc_plugin(TSCont /* contp ATS_UNUSED */, TSEvent event, void *edata) if (!uconfig->use_disk_buffer && my_state->req_size > uconfig->mem_buffer_size) { TSDebug(DEBUG_TAG, "The request size %" PRId64 " is larger than memory buffer size %" PRId64 - ", bypass upload proxy feature for this request.", + ", bypass upload proxy feature for this request", my_state->req_size, uconfig->mem_buffer_size); pvc_cleanup(new_cont, my_state); @@ -913,7 +913,7 @@ attach_pvc_plugin(TSCont /* contp ATS_UNUSED */, TSEvent event, void *edata) } } - TSDebug(DEBUG_TAG, "calling TSHttpTxnIntercept() ..."); + TSDebug(DEBUG_TAG, "calling TSHttpTxnIntercept()"); TSHttpTxnIntercept(new_cont, txnp); break; @@ -1213,7 +1213,7 @@ TSPluginInit(int argc, const char *argv[]) } // set the num of threads for disk AIO if (TSAIOThreadNumSet(uconfig->thread_num) == TS_ERROR) { - TSError("[buffer_upload] Failed to set thread number."); + TSError("[buffer_upload] Failed to set thread number"); } TSDebug(DEBUG_TAG, "uconfig->url_list_file: %s", uconfig->url_list_file); @@ -1227,12 +1227,12 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = const_cast("dev@trafficserver.apache.org"); if (uconfig->use_disk_buffer && !create_directory()) { - TSError("[buffer_upload] Directory creation failed."); + TSError("[buffer_upload] Directory creation failed"); uconfig->use_disk_buffer = false; } if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[buffer_upload] Plugin registration failed."); + TSError("[buffer_upload] Plugin registration failed"); } /* create the statistic variables */ diff --git a/plugins/experimental/cache_key_genid/cache_key_genid.c b/plugins/experimental/cache_key_genid/cache_key_genid.c index 4d0ac603de1..63bb2be7be6 100644 --- a/plugins/experimental/cache_key_genid/cache_key_genid.c +++ b/plugins/experimental/cache_key_genid/cache_key_genid.c @@ -166,7 +166,7 @@ TSPluginInit(int argc, const char *argv[]) } if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[%s] plugin registration failed. check version.", PLUGIN_NAME); + TSError("[%s] plugin registration failed, check version", PLUGIN_NAME); return; } diff --git a/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc b/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc index 7526a4ddb88..b6310204fce 100644 --- a/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc +++ b/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc @@ -333,7 +333,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = (char *)"dev@trafficserver.apache.org"; if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError("[%s] Plugin registration failed.", DEBUG_TAG); + TSError("[%s] Plugin registration failed", DEBUG_TAG); } process_args(argc, argv); diff --git a/plugins/experimental/custom_redirect/custom_redirect.cc b/plugins/experimental/custom_redirect/custom_redirect.cc index 30ab6229cfe..7496226a07f 100644 --- a/plugins/experimental/custom_redirect/custom_redirect.cc +++ b/plugins/experimental/custom_redirect/custom_redirect.cc @@ -152,9 +152,9 @@ TSPluginInit(int argc, const char *argv[]) redirect_url_header_len = strlen(redirect_url_header); } if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[custom_redirect] Plugin registration failed."); + TSError("[custom_redirect] Plugin registration failed"); } - TSError("[custom_redirect] Plugin registered successfully."); + TSError("[custom_redirect] Plugin registered successfully"); TSCont mainCont = TSContCreate(plugin_main_handler, nullptr); TSHttpHookAdd(TS_HTTP_READ_RESPONSE_HDR_HOOK, mainCont); } diff --git a/plugins/experimental/memcached_remap/memcached_remap.cc b/plugins/experimental/memcached_remap/memcached_remap.cc index 23e5dc1754a..88d07cd7889 100644 --- a/plugins/experimental/memcached_remap/memcached_remap.cc +++ b/plugins/experimental/memcached_remap/memcached_remap.cc @@ -172,7 +172,7 @@ TSPluginInit(int argc, const char *argv[]) TSDebug(PLUGIN_NAME, "about to init memcached"); if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[memcached_remap] Plugin registration failed."); + TSError("[memcached_remap] Plugin registration failed"); return; } diff --git a/plugins/experimental/mysql_remap/mysql_remap.cc b/plugins/experimental/mysql_remap/mysql_remap.cc index f7f6527c7b8..2f2324b8db2 100644 --- a/plugins/experimental/mysql_remap/mysql_remap.cc +++ b/plugins/experimental/mysql_remap/mysql_remap.cc @@ -191,7 +191,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = const_cast("dev@trafficserver.apache.org"); if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[mysql_remap] Plugin registration failed."); + TSError("[mysql_remap] Plugin registration failed"); } if (argc != 2) { diff --git a/plugins/experimental/remap_stats/remap_stats.c b/plugins/experimental/remap_stats/remap_stats.c index 2d32834e275..8ec9a9a1421 100644 --- a/plugins/experimental/remap_stats/remap_stats.c +++ b/plugins/experimental/remap_stats/remap_stats.c @@ -249,11 +249,11 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[remap_stats] Plugin registration failed."); + TSError("[remap_stats] Plugin registration failed"); return; } else { - TSDebug(DEBUG_TAG, "Plugin registration succeeded."); + TSDebug(DEBUG_TAG, "Plugin registration succeeded"); } config = TSmalloc(sizeof(config_t)); diff --git a/plugins/experimental/ssl_cert_loader/ssl-cert-loader.cc b/plugins/experimental/ssl_cert_loader/ssl-cert-loader.cc index 8aa16b8e465..8b02d779a83 100644 --- a/plugins/experimental/ssl_cert_loader/ssl-cert-loader.cc +++ b/plugins/experimental/ssl_cert_loader/ssl-cert-loader.cc @@ -259,6 +259,7 @@ int Parse_order = 0; void Parse_Config(Value &parent, ParsedSslValues &orig_values) { + bool inserted = false; ParsedSslValues cur_values(orig_values); Value val = parent.find("ssl-key-name"); @@ -299,6 +300,7 @@ Parse_Config(Value &parent, ParsedSslValues &orig_values) // Store in appropriate table if (cur_values.server_name.length() > 0) { Lookup.tree.insert(cur_values.server_name, entry, Parse_order++); + inserted = true; } if (cur_values.server_ips.size() > 0) { for (auto &server_ip : cur_values.server_ips) { @@ -309,11 +311,17 @@ Parse_Config(Value &parent, ParsedSslValues &orig_values) char val1[256], val2[256]; server_ip.first.toString(val1, sizeof(val1)); server_ip.second.toString(val2, sizeof(val2)); + inserted = true; } } if (entry != nullptr) { - for (const auto &cert_name : cert_names) { - Lookup.tree.insert(cert_name, entry, Parse_order++); + if (cert_names.size() > 0) { + for (const auto &cert_name : cert_names) { + Lookup.tree.insert(cert_name, entry, Parse_order++); + } + } else if (!inserted) { + delete entry; + TSError(PCP "cert_names is empty and entry not otherwise inserted!"); } } } @@ -510,15 +518,15 @@ TSPluginInit(int argc, const char *argv[]) } if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError(PCP "registration failed."); + TSError(PCP "registration failed"); } else if (TSTrafficServerVersionGetMajor() < 5) { - TSError(PCP "requires Traffic Server 5.0 or later."); + TSError(PCP "requires Traffic Server 5.0 or later"); } else if (nullptr == (cb_pa = TSContCreate(&CB_Pre_Accept, TSMutexCreate()))) { - TSError(PCP "Failed to pre-accept callback."); + TSError(PCP "Failed to pre-accept callback"); } else if (nullptr == (cb_lc = TSContCreate(&CB_Life_Cycle, TSMutexCreate()))) { - TSError(PCP "Failed to lifecycle callback."); + TSError(PCP "Failed to lifecycle callback"); } else if (nullptr == (cb_sni = TSContCreate(&CB_servername, TSMutexCreate()))) { - TSError(PCP "Failed to create SNI callback."); + TSError(PCP "Failed to create SNI callback"); } else { TSLifecycleHookAdd(TS_LIFECYCLE_PORTS_INITIALIZED_HOOK, cb_lc); TSHttpHookAdd(TS_VCONN_PRE_ACCEPT_HOOK, cb_pa); @@ -545,7 +553,7 @@ TSPluginInit(int argc, const char *argv[]) void TSPluginInit(int, const char *[]) { - TSError(PCP "requires TLS SNI which is not available."); + TSError(PCP "requires TLS SNI which is not available"); } #endif // TS_USE_TLS_SNI diff --git a/plugins/experimental/stale_while_revalidate/stale_while_revalidate.c b/plugins/experimental/stale_while_revalidate/stale_while_revalidate.c index 01aa89f013a..700787ffdad 100644 --- a/plugins/experimental/stale_while_revalidate/stale_while_revalidate.c +++ b/plugins/experimental/stale_while_revalidate/stale_while_revalidate.c @@ -399,7 +399,7 @@ consume_resource(TSCont cont, TSEvent event ATS_UNUSED, void *edata ATS_UNUSED) TSContDestroy(cont); break; default: - TSError("[%s] Unknown event %d.", PLUGIN_NAME, event); + TSError("[%s] Unknown event %d", PLUGIN_NAME, event); break; } @@ -703,7 +703,7 @@ TSPluginInit(int argc, const char *argv[]) return; } else { - TSDebug(PLUGIN_NAME, "Plugin registration succeeded."); + TSDebug(PLUGIN_NAME, "Plugin registration succeeded"); } plugin_config = TSmalloc(sizeof(config_t)); @@ -763,6 +763,6 @@ TSPluginInit(int argc, const char *argv[]) TSContDataSet(main_cont, (void *)plugin_config); TSHttpHookAdd(TS_HTTP_READ_REQUEST_HDR_HOOK, main_cont); - TSDebug(PLUGIN_NAME, "Plugin Init Complete."); + TSDebug(PLUGIN_NAME, "Plugin Init Complete"); } } diff --git a/plugins/experimental/stream_editor/stream_editor.cc b/plugins/experimental/stream_editor/stream_editor.cc index 5801f68d6c6..35db91f08ff 100644 --- a/plugins/experimental/stream_editor/stream_editor.cc +++ b/plugins/experimental/stream_editor/stream_editor.cc @@ -839,7 +839,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = (char *)"users@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[stream-editor] Plugin registration failed."); + TSError("[stream-editor] Plugin registration failed"); return; } diff --git a/plugins/experimental/ts_lua/ts_lua.c b/plugins/experimental/ts_lua/ts_lua.c index 25c97100fa8..c1f053aef5e 100644 --- a/plugins/experimental/ts_lua/ts_lua.c +++ b/plugins/experimental/ts_lua/ts_lua.c @@ -407,7 +407,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[ts_lua] Plugin registration failed."); + TSError("[ts_lua] Plugin registration failed"); } int ret = 0; diff --git a/plugins/experimental/ts_lua/ts_lua_http_config.c b/plugins/experimental/ts_lua/ts_lua_http_config.c index 435fc751035..16b0f7e5239 100644 --- a/plugins/experimental/ts_lua/ts_lua_http_config.c +++ b/plugins/experimental/ts_lua/ts_lua_http_config.c @@ -22,7 +22,7 @@ typedef enum { TS_LUA_CONFIG_URL_REMAP_PRISTINE_HOST_HDR = TS_CONFIG_URL_REMAP_PRISTINE_HOST_HDR, TS_LUA_CONFIG_HTTP_CHUNKING_ENABLED = TS_CONFIG_HTTP_CHUNKING_ENABLED, TS_LUA_CONFIG_HTTP_NEGATIVE_CACHING_ENABLED = TS_CONFIG_HTTP_NEGATIVE_CACHING_ENABLED, - TS_LUA_CONFIG_HTTP_NEGATIVE_CACHING_LIFETIME = TS_CONFIG_HTTP_CACHE_WHEN_TO_REVALIDATE, + TS_LUA_CONFIG_HTTP_NEGATIVE_CACHING_LIFETIME = TS_CONFIG_HTTP_NEGATIVE_CACHING_LIFETIME, TS_LUA_CONFIG_HTTP_CACHE_WHEN_TO_REVALIDATE = TS_CONFIG_HTTP_CACHE_WHEN_TO_REVALIDATE, TS_LUA_CONFIG_HTTP_KEEP_ALIVE_ENABLED_IN = TS_CONFIG_HTTP_KEEP_ALIVE_ENABLED_IN, TS_LUA_CONFIG_HTTP_KEEP_ALIVE_ENABLED_OUT = TS_CONFIG_HTTP_KEEP_ALIVE_ENABLED_OUT, @@ -108,7 +108,6 @@ typedef enum { TS_LUA_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES = TS_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES, TS_LUA_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY = TS_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY, TS_LUA_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT = TS_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT, - TS_LUA_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE = TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE, TS_LUA_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE = TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE, TS_LUA_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT = TS_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT, TS_LUA_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT = TS_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT, @@ -228,7 +227,6 @@ ts_lua_var_item ts_lua_http_config_vars[] = { TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT), - TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT), diff --git a/plugins/experimental/ts_lua/ts_lua_package.c b/plugins/experimental/ts_lua/ts_lua_package.c index 43e0a40ddb1..6b97832c069 100644 --- a/plugins/experimental/ts_lua/ts_lua_package.c +++ b/plugins/experimental/ts_lua/ts_lua_package.c @@ -69,7 +69,7 @@ ts_lua_add_package_path(lua_State *L) conf = ts_lua_get_instance_conf(L); if (conf == NULL) { - return luaL_error(L, "cann't get the instance conf."); + return luaL_error(L, "can't get the instance conf"); } data = luaL_checklstring(L, 1, &dlen); @@ -99,7 +99,7 @@ ts_lua_add_package_path(lua_State *L) if (i >= g_path_cnt) { if (n + i >= TS_LUA_MAX_PACKAGE_NUM) - return luaL_error(L, "extended package path number exceeds %d.", TS_LUA_MAX_PACKAGE_NUM); + return luaL_error(L, "extended package path number exceeds %d", TS_LUA_MAX_PACKAGE_NUM); pp[n].name = (char *)ptr; pp[n].len = item_len; @@ -115,7 +115,7 @@ ts_lua_add_package_path(lua_State *L) if (i >= path_cnt) { if (n + i >= TS_LUA_MAX_PACKAGE_NUM) - return luaL_error(L, "extended package path number exceeds %d.", TS_LUA_MAX_PACKAGE_NUM); + return luaL_error(L, "extended package path number exceeds %d", TS_LUA_MAX_PACKAGE_NUM); pp[n].name = (char *)ptr; pp[n].len = item_len; @@ -168,7 +168,7 @@ ts_lua_add_package_path_items(lua_State *L, ts_lua_package_path *pp, int n) lua_getglobal(L, "package"); if (!lua_istable(L, -1)) { - return luaL_error(L, "'package' table does not exist."); + return luaL_error(L, "'package' table does not exist"); } lua_getfield(L, -1, "path"); /* get old package.path */ @@ -182,7 +182,7 @@ ts_lua_add_package_path_items(lua_State *L, ts_lua_package_path *pp, int n) for (i = 0; i < n; i++) { if (new_path_len + pp[i].len + 1 >= sizeof(new_path)) { - TSError("[ts_lua] Extended package.path is too long."); + TSError("[ts_lua] Extended package.path is too long"); return -1; } @@ -215,7 +215,7 @@ ts_lua_add_package_cpath(lua_State *L) conf = ts_lua_get_instance_conf(L); if (conf == NULL) { - return luaL_error(L, "cann't get the instance conf."); + return luaL_error(L, "can't get the instance conf"); } data = luaL_checklstring(L, 1, &dlen); @@ -245,7 +245,7 @@ ts_lua_add_package_cpath(lua_State *L) if (i >= g_cpath_cnt) { if (n + i >= TS_LUA_MAX_PACKAGE_NUM) - return luaL_error(L, "extended package cpath number exceeds %d.", TS_LUA_MAX_PACKAGE_NUM); + return luaL_error(L, "extended package cpath number exceeds %d", TS_LUA_MAX_PACKAGE_NUM); pp[n].name = (char *)ptr; pp[n].len = item_len; @@ -261,7 +261,7 @@ ts_lua_add_package_cpath(lua_State *L) if (i >= cpath_cnt) { if (n + i >= TS_LUA_MAX_PACKAGE_NUM) - return luaL_error(L, "extended package cpath number exceeds %d.", TS_LUA_MAX_PACKAGE_NUM); + return luaL_error(L, "extended package cpath number exceeds %d", TS_LUA_MAX_PACKAGE_NUM); pp[n].name = (char *)ptr; pp[n].len = item_len; @@ -314,7 +314,7 @@ ts_lua_add_package_cpath_items(lua_State *L, ts_lua_package_path *pp, int n) lua_getglobal(L, "package"); if (!lua_istable(L, -1)) { - return luaL_error(L, "'package' table does not exist."); + return luaL_error(L, "'package' table does not exist"); } lua_getfield(L, -1, "cpath"); /* get old package.cpath */ @@ -328,7 +328,7 @@ ts_lua_add_package_cpath_items(lua_State *L, ts_lua_package_path *pp, int n) for (i = 0; i < n; i++) { if (new_path_len + pp[i].len + 1 >= sizeof(new_path)) { - TSError("[ts_lua] Extended package.cpath is too long."); + TSError("[ts_lua] Extended package.cpath is too long"); return -1; } diff --git a/plugins/experimental/url_sig/url_sig.c b/plugins/experimental/url_sig/url_sig.c index 240c9a54a5e..d9b9501fcad 100644 --- a/plugins/experimental/url_sig/url_sig.c +++ b/plugins/experimental/url_sig/url_sig.c @@ -16,6 +16,13 @@ limitations under the License. */ +#define min(a, b) \ + ({ \ + __typeof__(a) _a = (a); \ + __typeof__(b) _b = (b); \ + _a < _b ? _a : _b; \ + }) + #include "ts/ink_defs.h" #include "url_sig.h" @@ -54,7 +61,7 @@ struct config { static void free_cfg(struct config *cfg) { - TSError("[url_sig] Cleaning up..."); + TSError("[url_sig] Cleaning up"); TSfree(cfg->err_url); if (cfg->regex_extra) { @@ -99,7 +106,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s if (argc != 3) { snprintf(errbuf, errbuf_size - 1, - "[TSRemapNewKeyInstance] - Argument count wrong (%d)... Need exactly two pparam= (config file name).", argc); + "[TSRemapNewKeyInstance] - Argument count wrong (%d)... Need exactly two pparam= (config file name)", argc); return TS_ERROR; } TSDebug(PLUGIN_NAME, "Initializing remap function of %s -> %s with config from %s", argv[0], argv[1], argv[2]); @@ -109,7 +116,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s TSDebug(PLUGIN_NAME, "config file name: %s", config_file); FILE *file = fopen(config_file, "r"); if (file == NULL) { - snprintf(errbuf, errbuf_size - 1, "[TSRemapNewInstance] - Error opening file %s.", config_file); + snprintf(errbuf, errbuf_size - 1, "[TSRemapNewInstance] - Error opening file %s", config_file); return TS_ERROR; } @@ -128,7 +135,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s } char *pos = strchr(line, '='); if (pos == NULL) { - TSError("[url_sig] Error parsing line %d of file %s (%s).", line_no, config_file, line); + TSError("[url_sig] Error parsing line %d of file %s (%s)", line_no, config_file, line); continue; } *pos = '\0'; @@ -141,7 +148,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s *pos = '\0'; } if (pos == NULL || strlen(value) >= MAX_KEY_LEN) { - snprintf(errbuf, errbuf_size - 1, "[TSRemapNewInstance] - Maximum key length (%d) exceeded on line %d.", MAX_KEY_LEN - 1, + snprintf(errbuf, errbuf_size - 1, "[TSRemapNewInstance] - Maximum key length (%d) exceeded on line %d", MAX_KEY_LEN - 1, line_no); fclose(file); free_cfg(cfg); @@ -159,8 +166,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s } TSDebug(PLUGIN_NAME, "key number %d == %s", keynum, value); if (keynum >= MAX_KEY_NUM || keynum < 0) { - snprintf(errbuf, errbuf_size - 1, "[TSRemapNewInstance] - Key number (%d) >= MAX_KEY_NUM (%d) or NaN.", keynum, - MAX_KEY_NUM); + snprintf(errbuf, errbuf_size - 1, "[TSRemapNewInstance] - Key number (%d) >= MAX_KEY_NUM (%d) or NaN", keynum, MAX_KEY_NUM); fclose(file); free_cfg(cfg); return TS_ERROR; @@ -191,7 +197,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s cfg->regex = pcre_compile(value, options, &errptr, &erroffset, NULL); if (cfg->regex == NULL) { - TSDebug(PLUGIN_NAME, "Regex compilation failed with error (%s) at character %d.", errptr, erroffset); + TSDebug(PLUGIN_NAME, "Regex compilation failed with error (%s) at character %d", errptr, erroffset); } else { #ifdef PCRE_STUDY_JIT_COMPILE options = PCRE_STUDY_JIT_COMPILE; @@ -200,7 +206,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s cfg->regex, options, &errptr); // We do not need to check the error here because we can still run without the studying? } } else { - TSError("[url_sig] Error parsing line %d of file %s (%s).", line_no, config_file, line); + TSError("[url_sig] Error parsing line %d of file %s (%s)", line_no, config_file, line); } } @@ -222,7 +228,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s } break; default: - snprintf(errbuf, errbuf_size - 1, "[TSRemapNewInstance] - Return code %d not supported.", cfg->err_status); + snprintf(errbuf, errbuf_size - 1, "[TSRemapNewInstance] - Return code %d not supported", cfg->err_status); fclose(file); free_cfg(cfg); return TS_ERROR; @@ -254,18 +260,18 @@ err_log(char *url, char *msg) // See the README. All Signing parameters must be concatenated to the end // of the url and any application query parameters. static char * -getAppQueryString(char *query_string, int query_length) +getAppQueryString(char *query_string, unsigned int query_length) { int done = 0; char *p; char buf[MAX_QUERY_LEN]; - if (query_length > MAX_QUERY_LEN) { - TSDebug(PLUGIN_NAME, "Cannot process the query string as the length exceeds %d bytes.", MAX_QUERY_LEN); + if (query_length >= sizeof(buf)) { + TSDebug(PLUGIN_NAME, "Cannot process the query string as the length exceeds %d bytes", MAX_QUERY_LEN); return NULL; } memset(buf, 0, MAX_QUERY_LEN); - strncpy(buf, query_string, query_length); + strncpy(buf, query_string, min(query_length, sizeof(buf) - 1)); p = buf; TSDebug(PLUGIN_NAME, "query_string: %s, query_length: %d", query_string, query_length); @@ -343,7 +349,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) url = TSUrlStringGet(rri->requestBufp, rri->requestUrl, &url_len); if (url_len >= MAX_REQ_LEN - 1) { - err_log(url, "URL string too long."); + err_log(url, "URL string too long"); goto deny; } @@ -369,12 +375,12 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) } if (query == NULL) { - err_log(url, "Has no query string."); + err_log(url, "Has no query string"); goto deny; } if (strncmp(url, "http://", strlen("http://")) != 0) { - err_log(url, "Invalid URL scheme - only http supported."); + err_log(url, "Invalid URL scheme - only http supported"); goto deny; } @@ -388,15 +394,15 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) p += strlen(CIP_QSTRING + 1); pp = strstr(p, "&"); if ((pp - p) > CIP_STRLEN - 1 || (pp - p) < 4) { - err_log(url, "IP address string too long or short."); + err_log(url, "IP address string too long or short"); goto deny; } - strncpy(client_ip, p + strlen(CIP_QSTRING) + 1, (pp - p - (strlen(CIP_QSTRING) + 1))); + strncpy(client_ip, p + strlen(CIP_QSTRING) + 1, min((pp - p - (strlen(CIP_QSTRING) + 1)), sizeof(client_ip) - 1)); client_ip[pp - p - (strlen(CIP_QSTRING) + 1)] = '\0'; TSDebug(PLUGIN_NAME, "CIP: -%s-", client_ip); retval = TSHttpTxnClientFdGet(txnp, &sockfd); if (retval != TS_SUCCESS) { - err_log(url, "Error getting sockfd."); + err_log(url, "Error getting sockfd"); goto deny; } peer_len = sizeof(peer); @@ -407,7 +413,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) inet_ntop(AF_INET, &s->sin_addr, ipstr, sizeof ipstr); TSDebug(PLUGIN_NAME, "Peer address: -%s-", ipstr); if (strcmp(ipstr, client_ip) != 0) { - err_log(url, "Client IP doesn't match signature."); + err_log(url, "Client IP doesn't match signature"); goto deny; } } @@ -417,12 +423,12 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) p += strlen(EXP_QSTRING) + 1; expiration = atoi(p); if (expiration == 0 || expiration < time(NULL)) { - err_log(url, "Invalid expiration, or expired."); + err_log(url, "Invalid expiration, or expired"); goto deny; } TSDebug(PLUGIN_NAME, "Exp: %d", (int)expiration); } else { - err_log(url, "Expiration query string not found."); + err_log(url, "Expiration query string not found"); goto deny; } // Algorithm @@ -433,7 +439,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) // The check for a valid algorithm is later. TSDebug(PLUGIN_NAME, "Algorithm: %d", algorithm); } else { - err_log(url, "Algorithm query string not found."); + err_log(url, "Algorithm query string not found"); goto deny; } // Key index @@ -442,12 +448,12 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) p += strlen(KIN_QSTRING) + 1; keyindex = atoi(p); if (keyindex < 0 || keyindex >= MAX_KEY_NUM || 0 == cfg->keys[keyindex][0]) { - err_log(url, "Invalid key index."); + err_log(url, "Invalid key index"); goto deny; } TSDebug(PLUGIN_NAME, "Key Index: %d", keyindex); } else { - err_log(url, "KeyIndex query string not found."); + err_log(url, "KeyIndex query string not found"); goto deny; } // Parts @@ -458,7 +464,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) p = strstr(parts, "&"); TSDebug(PLUGIN_NAME, "Parts: %.*s", (int)(p - parts), parts); } else { - err_log(url, "PartsSigned query string not found."); + err_log(url, "PartsSigned query string not found"); goto deny; } // And finally, the sig (has to be last) @@ -468,11 +474,11 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) signature = p; // NOTE sig is not NULL terminated, it has to be 20 chars if ((algorithm == USIG_HMAC_SHA1 && strlen(signature) < SHA1_SIG_SIZE) || (algorithm == USIG_HMAC_MD5 && strlen(signature) < MD5_SIG_SIZE)) { - err_log(url, "Signature query string too short (< 20)."); + err_log(url, "Signature query string too short (< 20)"); goto deny; } } else { - err_log(url, "Signature query string not found."); + err_log(url, "Signature query string not found"); goto deny; } @@ -486,8 +492,8 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) part = strtok_r(urltokstr, "/", &p); while (part != NULL) { if (parts[j] == '1') { - strcpy(signed_part + strlen(signed_part), part); - strcpy(signed_part + strlen(signed_part), "/"); + strncat(signed_part, part, sizeof(signed_part) - strlen(signed_part) - 1); + strncat(signed_part, "/", sizeof(signed_part) - strlen(signed_part) - 1); } if (parts[j + 1] == '0' || parts[j + 1] == '1') { // This remembers the last part, meaning, if there are no more valid letters in parts @@ -498,7 +504,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) signed_part[strlen(signed_part) - 1] = '?'; // chop off the last /, replace with '?' p = strstr(query, SIG_QSTRING "="); - strncat(signed_part, query, (p - query) + strlen(SIG_QSTRING) + 1); + strncat(signed_part, query, min((p - query) + strlen(SIG_QSTRING) + 1, sizeof(signed_part) - strlen(signed_part) - 1)); TSDebug(PLUGIN_NAME, "Signed string=\"%s\"", signed_part); @@ -524,7 +530,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) } break; default: - err_log(url, "Algorithm not supported."); + err_log(url, "Algorithm not supported"); goto deny; } @@ -537,10 +543,10 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) /* and compare to signature that was sent */ cmp_res = strncmp(sig_string, signature, sig_len * 2); if (cmp_res != 0) { - err_log(url, "Signature check failed."); + err_log(url, "Signature check failed"); goto deny; } else { - TSDebug(PLUGIN_NAME, "Signature check passed."); + TSDebug(PLUGIN_NAME, "Signature check passed"); goto allow; } @@ -583,7 +589,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo *rri) rval = TSUrlHttpQuerySet(rri->requestBufp, rri->requestUrl, NULL, 0); } if (rval != TS_SUCCESS) { - TSError("[url_sig] Error setting the query string: %d.", rval); + TSError("[url_sig] Error setting the query string: %d", rval); } return TSREMAP_NO_REMAP; } diff --git a/plugins/header_rewrite/header_rewrite.cc b/plugins/header_rewrite/header_rewrite.cc index 1f30de28f80..e3709d329a8 100644 --- a/plugins/header_rewrite/header_rewrite.cc +++ b/plugins/header_rewrite/header_rewrite.cc @@ -327,7 +327,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = (char *)"dev@trafficserver.apache.org"; if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError("[%s] plugin registration failed.", PLUGIN_NAME); + TSError("[%s] plugin registration failed", PLUGIN_NAME); } // Parse the global config file(s). All rules are just appended diff --git a/plugins/header_rewrite/parser.cc b/plugins/header_rewrite/parser.cc index 3929f299e5a..77da4460fc4 100644 --- a/plugins/header_rewrite/parser.cc +++ b/plugins/header_rewrite/parser.cc @@ -82,7 +82,7 @@ Parser::Parser(const std::string &original_line) : _cond(false), _empty(false) extracting_token = false; } else { // Malformed expression / operation, ignore ... - TSError("[%s] malformed line \"%s\" ignoring...", PLUGIN_NAME, line.c_str()); + TSError("[%s] malformed line \"%s\", ignoring", PLUGIN_NAME, line.c_str()); _tokens.clear(); _empty = true; return; @@ -111,7 +111,7 @@ Parser::Parser(const std::string &original_line) : _cond(false), _empty(false) _tokens.push_back(line.substr(cur_token_start)); } else { // unterminated quote, error case. - TSError("[%s] malformed line, unterminated quotation: \"%s\" ignoring...", PLUGIN_NAME, line.c_str()); + TSError("[%s] malformed line, unterminated quotation: \"%s\", ignoring", PLUGIN_NAME, line.c_str()); _tokens.clear(); _empty = true; return; diff --git a/plugins/healthchecks/healthchecks.c b/plugins/healthchecks/healthchecks.c index 995151e9204..c87c94d8286 100644 --- a/plugins/healthchecks/healthchecks.c +++ b/plugins/healthchecks/healthchecks.c @@ -565,7 +565,7 @@ TSPluginInit(int argc, const char *argv[]) TSPluginRegistrationInfo info; if (2 != argc) { - TSError("[healthchecks] Must specify a configuration file."); + TSError("[healthchecks] Must specify a configuration file"); return; } @@ -574,7 +574,7 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TS_SUCCESS != TSPluginRegister(&info)) { - TSError("[healthchecks] Plugin registration failed."); + TSError("[healthchecks] Plugin registration failed"); return; } diff --git a/plugins/regex_revalidate/regex_revalidate.c b/plugins/regex_revalidate/regex_revalidate.c index 1c352b81333..2a5e5310391 100644 --- a/plugins/regex_revalidate/regex_revalidate.c +++ b/plugins/regex_revalidate/regex_revalidate.c @@ -478,7 +478,7 @@ TSPluginInit(int argc, const char *argv[]) plugin_state_t *pstate; invalidate_t *iptr = NULL; - TSDebug(LOG_PREFIX, "Starting plugin init."); + TSDebug(LOG_PREFIX, "Starting plugin init"); pstate = (plugin_state_t *)TSmalloc(sizeof(plugin_state_t)); init_plugin_state_t(pstate); @@ -505,7 +505,7 @@ TSPluginInit(int argc, const char *argv[]) } if (!pstate->config_file) { - TSError("[regex_revalidate] Plugin requires a --config option along with a config file name."); + TSError("[regex_revalidate] Plugin requires a --config option along with a config file name"); free_plugin_state_t(pstate); return; } @@ -522,12 +522,12 @@ TSPluginInit(int argc, const char *argv[]) info.support_email = "dev@trafficserver.apache.org"; if (TSPluginRegister(&info) != TS_SUCCESS) { - TSError("[regex_revalidate] Plugin registration failed."); + TSError("[regex_revalidate] Plugin registration failed"); free_plugin_state_t(pstate); return; } else { - TSDebug(LOG_PREFIX, "Plugin registration succeeded."); + TSDebug(LOG_PREFIX, "Plugin registration succeeded"); } if (!check_ts_version()) { @@ -547,5 +547,5 @@ TSPluginInit(int argc, const char *argv[]) TSContDataSet(config_cont, (void *)pstate); TSContSchedule(config_cont, CONFIG_TMOUT, TS_THREAD_POOL_TASK); - TSDebug(LOG_PREFIX, "Plugin Init Complete."); + TSDebug(LOG_PREFIX, "Plugin Init Complete"); } diff --git a/plugins/tcpinfo/tcpinfo.cc b/plugins/tcpinfo/tcpinfo.cc index c0e451a1a0b..c1d21488376 100644 --- a/plugins/tcpinfo/tcpinfo.cc +++ b/plugins/tcpinfo/tcpinfo.cc @@ -361,7 +361,7 @@ TSPluginInit(int argc, const char *argv[]) case 'e': i = strtoul(optarg, &endptr, 10); if (*endptr != '\0' || i > 3) { - TSError("[tcpinfo] invalid rolling-enabled argument, '%s', using default of %d.", optarg, rolling_enabled); + TSError("[tcpinfo] invalid rolling-enabled argument, '%s', using default of %d", optarg, rolling_enabled); } else { rolling_enabled = i; } @@ -369,7 +369,7 @@ TSPluginInit(int argc, const char *argv[]) case 'H': i = strtoul(optarg, &endptr, 10); if (*endptr != '\0' || i > 23) { - TSError("[tcpinfo] invalid rolling-offset-hr argument, '%s', using default of %d.", optarg, rolling_offset_hr); + TSError("[tcpinfo] invalid rolling-offset-hr argument, '%s', using default of %d", optarg, rolling_offset_hr); } else { rolling_offset_hr = i; } @@ -377,7 +377,7 @@ TSPluginInit(int argc, const char *argv[]) case 'S': i = strtoul(optarg, &endptr, 10); if (*endptr != '\0' || i < 60 || i > 86400) { - TSError("[tcpinfo] invalid rolling-interval-sec argument, '%s', using default of %d.", optarg, rolling_interval_sec); + TSError("[tcpinfo] invalid rolling-interval-sec argument, '%s', using default of %d", optarg, rolling_interval_sec); } else { rolling_interval_sec = i; } @@ -385,7 +385,7 @@ TSPluginInit(int argc, const char *argv[]) case 'M': i = ink_atoui(optarg); if (i < 10) { - TSError("[tcpinfo] invalid rolling-size argument, '%s', using default of %d.", optarg, rolling_size); + TSError("[tcpinfo] invalid rolling-size argument, '%s', using default of %d", optarg, rolling_size); } else { rolling_size = i; } diff --git a/proxy/InkAPI.cc b/proxy/InkAPI.cc index 118e7c281bb..67dbb40e30c 100644 --- a/proxy/InkAPI.cc +++ b/proxy/InkAPI.cc @@ -6554,7 +6554,7 @@ TSVConnRead(TSVConn connp, TSCont contp, TSIOBuffer bufp, int64_t nbytes) FORCE_PLUGIN_SCOPED_MUTEX(contp); VConnection *vc = (VConnection *)connp; - return reinterpret_cast(vc->do_io(VIO::READ, (INKContInternal *)contp, nbytes, (MIOBuffer *)bufp)); + return reinterpret_cast(vc->do_io_read((INKContInternal *)contp, nbytes, (MIOBuffer *)bufp)); } TSVIO @@ -8046,9 +8046,6 @@ _conf_to_memberp(TSOverridableConfigKey conf, OverridableHttpConfigParams *overr case TS_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT: ret = _memberp_to_generic(&overridableHttpConfig->attach_server_session_to_client, typep); break; - case TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE: - ret = _memberp_to_generic(&overridableHttpConfig->safe_requests_retryable, typep); - break; case TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE: ret = _memberp_to_generic(&overridableHttpConfig->origin_max_connections_queue, typep); break; @@ -8520,8 +8517,6 @@ TSHttpTxnConfigFind(const char *name, int length, TSOverridableConfigKey *conf, cnf = TS_CONFIG_HTTP_ANONYMIZE_REMOVE_COOKIE; } else if (!strncmp(name, "proxy.config.http.request_header_max_size", length)) { cnf = TS_CONFIG_HTTP_REQUEST_HEADER_MAX_SIZE; - } else if (!strncmp(name, "proxy.config.http.safe_requests_retryable", length)) { - cnf = TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE; } break; case 'r': diff --git a/proxy/InkAPITest.cc b/proxy/InkAPITest.cc index 47f34cea6c8..d8e3a9cc6a4 100644 --- a/proxy/InkAPITest.cc +++ b/proxy/InkAPITest.cc @@ -7612,7 +7612,6 @@ const char *SDK_Overridable_Configs[TS_CONFIG_LAST_ENTRY] = { "proxy.config.http.cache.max_open_write_retries", "proxy.config.http.redirect_use_orig_cache_key", "proxy.config.http.attach_server_session_to_client", - "proxy.config.http.safe_requests_retryable", "proxy.config.http.origin_max_connections_queue", "proxy.config.websocket.no_activity_timeout", "proxy.config.websocket.active_timeout", diff --git a/proxy/Main.cc b/proxy/Main.cc index 9c7548cfcb6..d910efcd447 100644 --- a/proxy/Main.cc +++ b/proxy/Main.cc @@ -1756,6 +1756,13 @@ main(int /* argc ATS_UNUSED */, const char **argv) ink_hostdb_init(makeModuleVersion(HOSTDB_MODULE_MAJOR_VERSION, HOSTDB_MODULE_MINOR_VERSION, PRIVATE_MODULE_HEADER)); ink_dns_init(makeModuleVersion(HOSTDB_MODULE_MAJOR_VERSION, HOSTDB_MODULE_MINOR_VERSION, PRIVATE_MODULE_HEADER)); ink_split_dns_init(makeModuleVersion(1, 0, PRIVATE_MODULE_HEADER)); + + // Do the inits for NetProcessors that use ET_NET threads. MUST be before starting those threads. + netProcessor.init(); + pre_thread_HttpProxyServer(); + + // !! ET_NET threads start here !! + // This means any spawn scheduled thread set up must be done before this point. eventProcessor.start(num_of_net_threads, stacksize); int num_remap_threads = 0; @@ -1806,13 +1813,6 @@ main(int /* argc ATS_UNUSED */, const char **argv) } HttpProxyPort::loadDefaultIfEmpty(); - if (!accept_mss) { - REC_ReadConfigInteger(accept_mss, "proxy.config.net.sock_mss_in"); - } - - NetProcessor::accept_mss = accept_mss; - netProcessor.start(0, stacksize); - dnsProcessor.start(0, stacksize); if (hostDBProcessor.start() < 0) SignalWarning(MGMT_SIGNAL_SYSTEM_ERROR, "bad hostdb or storage configuration, hostdb disabled"); @@ -1870,9 +1870,8 @@ main(int /* argc ATS_UNUSED */, const char **argv) // main server logic initiated here // ////////////////////////////////////// - transformProcessor.start(); - init_HttpProxyServer(num_accept_threads); + transformProcessor.start(); int http_enabled = 1; REC_ReadConfigInteger(http_enabled, "proxy.config.http.enabled"); diff --git a/proxy/ProxyClientSession.h b/proxy/ProxyClientSession.h index 0c84ce1afbb..ee6f1aa48d7 100644 --- a/proxy/ProxyClientSession.h +++ b/proxy/ProxyClientSession.h @@ -226,8 +226,8 @@ class ProxyClientSession : public VConnection /// DNS resolution preferences. HostResStyle host_res_style = HOST_RES_IPV4; - ink_hrtime ssn_start_time; - ink_hrtime ssn_last_txn_time; + ink_hrtime ssn_start_time = 0; + ink_hrtime ssn_last_txn_time = 0; protected: // XXX Consider using a bitwise flags variable for the following flags, so diff --git a/proxy/ReverseProxy.cc b/proxy/ReverseProxy.cc index 73e1f2f3e48..729f3458fbb 100644 --- a/proxy/ReverseProxy.cc +++ b/proxy/ReverseProxy.cc @@ -43,7 +43,7 @@ #include "UrlMapping.h" /** Time till we free the old stuff after a reconfiguration. */ -#define URL_REWRITE_TIMEOUT (HRTIME_SECOND * 60) +#define URL_REWRITE_TIMEOUT (HRTIME_SECOND * 300) // Global Ptrs static Ptr reconfig_mutex; diff --git a/proxy/TestSimpleProxy.cc b/proxy/TestSimpleProxy.cc index cc73a003ba9..67dff518bb2 100644 --- a/proxy/TestSimpleProxy.cc +++ b/proxy/TestSimpleProxy.cc @@ -44,9 +44,9 @@ struct TestProxy : Continuation { if (outbuf) free_MIOBuffer(outbuf); if (vc) - vc->do_io(VIO::CLOSE); + vc->do_io_close(); if (remote) - remote->do_io(VIO::CLOSE); + remote->do_io_close(); delete this; return EVENT_DONE; } @@ -94,7 +94,7 @@ struct TestProxy : Continuation { } remote = aremote; outbuf = new_MIOBuffer(); - remote->do_io(VIO::WRITE, this, INT64_MAX, outbuf); + remote->do_io_write(this, INT64_MAX, outbuf); *url_end = 0; sprintf(outbuf->start, "GET %s HTTP/1.0\n\n\n", url); outbuf->fill(strlen(outbuf->start) + 1); @@ -143,7 +143,7 @@ struct TestAccept : Continuation { { if (!event) { MIOBuffer *buf = new_MIOBuffer(); - e->do_io(VIO::READ, new TestProxy(buf), INT64_MAX, buf); + e->do_io_read(new TestProxy(buf), INT64_MAX, buf); } else { printf("TestAccept error %d\n", event); return EVENT_DONE; diff --git a/proxy/hdrs/HTTP.h b/proxy/hdrs/HTTP.h index e32704e0615..b4eb2369f04 100644 --- a/proxy/hdrs/HTTP.h +++ b/proxy/hdrs/HTTP.h @@ -103,7 +103,8 @@ enum HTTPWarningCode { HTTP_WARNING_CODE_MISC_WARNING = 199 }; -/* squild log codes */ +/* squild log codes + There is code (e.g. logstats) that depends on these errors coming at the end of this enum */ enum SquidLogCode { SQUID_LOG_EMPTY = '0', SQUID_LOG_TCP_HIT = '1', diff --git a/proxy/hdrs/HdrHeap.cc b/proxy/hdrs/HdrHeap.cc index 842025de997..87490dd11c1 100644 --- a/proxy/hdrs/HdrHeap.cc +++ b/proxy/hdrs/HdrHeap.cc @@ -861,7 +861,7 @@ HdrHeap::check_marshalled(uint32_t buf_length) int HdrHeap::unmarshal(int buf_length, int obj_type, HdrHeapObjImpl **found_obj, RefCountObj *block_ref) { - bool obj_found = false; + *found_obj = nullptr; // Check out this heap and make sure it is OK if (m_magic != HDR_BUF_MAGIC_MARSHALED) { @@ -929,7 +929,7 @@ HdrHeap::unmarshal(int buf_length, int obj_type, HdrHeapObjImpl **found_obj, Ref HdrHeapObjImpl *obj = (HdrHeapObjImpl *)obj_data; ink_assert(obj_is_aligned(obj)); - if (obj->m_type == (unsigned)obj_type && obj_found == false) { + if (obj->m_type == (unsigned)obj_type && *found_obj == nullptr) { *found_obj = obj; } diff --git a/proxy/http/HttpCacheSM.h b/proxy/http/HttpCacheSM.h index 4037d784834..b72d43827c3 100644 --- a/proxy/http/HttpCacheSM.h +++ b/proxy/http/HttpCacheSM.h @@ -143,7 +143,7 @@ class HttpCacheSM : public Continuation { if (cache_read_vc) { HTTP_DECREMENT_DYN_STAT(http_current_cache_connections_stat); - cache_read_vc->do_io(VIO::ABORT); + cache_read_vc->do_io_close(); // abort cache_read_vc = NULL; } } @@ -152,7 +152,7 @@ class HttpCacheSM : public Continuation { if (cache_write_vc) { HTTP_DECREMENT_DYN_STAT(http_current_cache_connections_stat); - cache_write_vc->do_io(VIO::ABORT); + cache_write_vc->do_io_close(); // abort cache_write_vc = NULL; } } @@ -161,7 +161,7 @@ class HttpCacheSM : public Continuation { if (cache_write_vc) { HTTP_DECREMENT_DYN_STAT(http_current_cache_connections_stat); - cache_write_vc->do_io(VIO::CLOSE); + cache_write_vc->do_io_close(); cache_write_vc = NULL; } } @@ -170,7 +170,7 @@ class HttpCacheSM : public Continuation { if (cache_read_vc) { HTTP_DECREMENT_DYN_STAT(http_current_cache_connections_stat); - cache_read_vc->do_io(VIO::CLOSE); + cache_read_vc->do_io_close(); cache_read_vc = NULL; } } diff --git a/proxy/http/HttpConfig.cc b/proxy/http/HttpConfig.cc index ed546e3313b..062bdd4169e 100644 --- a/proxy/http/HttpConfig.cc +++ b/proxy/http/HttpConfig.cc @@ -897,7 +897,6 @@ HttpConfig::startup() HttpEstablishStaticConfigLongLong(c.oride.origin_max_connections_queue, "proxy.config.http.origin_max_connections_queue"); HttpEstablishStaticConfigLongLong(c.origin_min_keep_alive_connections, "proxy.config.http.origin_min_keep_alive_connections"); HttpEstablishStaticConfigByte(c.oride.attach_server_session_to_client, "proxy.config.http.attach_server_session_to_client"); - HttpEstablishStaticConfigByte(c.oride.safe_requests_retryable, "proxy.config.http.safe_requests_retryable"); HttpEstablishStaticConfigByte(c.disable_ssl_parenting, "proxy.local.http.parent_proxy.disable_connect_tunneling"); HttpEstablishStaticConfigByte(c.oride.forward_connect_method, "proxy.config.http.forward_connect_method"); @@ -1170,7 +1169,6 @@ HttpConfig::reconfigure() } params->origin_min_keep_alive_connections = m_master.origin_min_keep_alive_connections; params->oride.attach_server_session_to_client = m_master.oride.attach_server_session_to_client; - params->oride.safe_requests_retryable = m_master.oride.safe_requests_retryable; if (params->oride.origin_max_connections && params->oride.origin_max_connections < params->origin_min_keep_alive_connections) { Warning("origin_max_connections < origin_min_keep_alive_connections, setting min=max , please correct your records.config"); diff --git a/proxy/http/HttpConfig.h b/proxy/http/HttpConfig.h index 52f0a0f7dc6..85ccca2aae7 100644 --- a/proxy/http/HttpConfig.h +++ b/proxy/http/HttpConfig.h @@ -377,7 +377,6 @@ struct OverridableHttpConfigParams { fwd_proxy_auth_to_parent(0), uncacheable_requests_bypass_parent(1), attach_server_session_to_client(0), - safe_requests_retryable(1), forward_connect_method(0), insert_age_in_response(1), anonymize_remove_from(0), @@ -503,8 +502,6 @@ struct OverridableHttpConfigParams { MgmtByte uncacheable_requests_bypass_parent; MgmtByte attach_server_session_to_client; - MgmtByte safe_requests_retryable; - MgmtByte forward_connect_method; MgmtByte insert_age_in_response; diff --git a/proxy/http/HttpProxyServerMain.cc b/proxy/http/HttpProxyServerMain.cc index 7b59eef1077..ae431d5cfed 100644 --- a/proxy/http/HttpProxyServerMain.cc +++ b/proxy/http/HttpProxyServerMain.cc @@ -222,6 +222,13 @@ MakeHttpProxyAcceptor(HttpProxyAcceptor &acceptor, HttpProxyPort &port, unsigned } } +/// Do all pre-thread initialization / setup. +void +pre_thread_HttpProxyServer() +{ + httpSessionManager.init(); +} + /** Set up all the accepts and sockets. */ void @@ -230,7 +237,6 @@ init_HttpProxyServer(int n_accept_threads) HttpProxyPort::Group &proxy_ports = HttpProxyPort::global(); init_reverse_proxy(); - httpSessionManager.init(); http_pages_init(); #ifdef USE_HTTP_DEBUG_LISTS diff --git a/proxy/http/HttpProxyServerMain.h b/proxy/http/HttpProxyServerMain.h index 2950bc97cf4..1a881aebc2f 100644 --- a/proxy/http/HttpProxyServerMain.h +++ b/proxy/http/HttpProxyServerMain.h @@ -23,6 +23,9 @@ struct HttpProxyPort; +/// Perform any pre-thread start initialization. +void pre_thread_HttpProxyServer(); + /** Initialize all HTTP proxy port data structures needed to run. */ void init_HttpProxyServer(int n_accept_threads = 0); diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc index 8e2b8282cbc..52a0e498244 100644 --- a/proxy/http/HttpSM.cc +++ b/proxy/http/HttpSM.cc @@ -267,74 +267,10 @@ HttpVCTable::cleanup_all() static int next_sm_id = 0; -HttpSM::HttpSM() - : Continuation(nullptr), - sm_id(-1), - magic(HTTP_SM_MAGIC_DEAD), - // YTS Team, yamsat Plugin - enable_redirection(false), - redirect_url(nullptr), - redirect_url_len(0), - redirection_tries(0), - transfered_bytes(0), - post_failed(false), - debug_on(false), - plugin_tunnel_type(HTTP_NO_PLUGIN_TUNNEL), - plugin_tunnel(nullptr), - reentrancy_count(0), - history_pos(0), - tunnel(), - ua_entry(nullptr), - ua_session(nullptr), - background_fill(BACKGROUND_FILL_NONE), - ua_raw_buffer_reader(nullptr), - server_entry(nullptr), - server_session(nullptr), - will_be_private_ss(false), - shared_session_retries(0), - server_buffer_reader(nullptr), - transform_info(), - post_transform_info(), - has_active_plugin_agents(false), - second_cache_sm(nullptr), - default_handler(nullptr), - pending_action(nullptr), - last_action(HttpTransact::SM_ACTION_UNDEFINED), - // TODO: Now that bodies can be empty, should the body counters be set to -1 ? TS-2213 - client_request_hdr_bytes(0), - client_request_body_bytes(0), - server_request_hdr_bytes(0), - server_request_body_bytes(0), - server_response_hdr_bytes(0), - server_response_body_bytes(0), - client_response_hdr_bytes(0), - client_response_body_bytes(0), - cache_response_hdr_bytes(0), - cache_response_body_bytes(0), - pushed_response_hdr_bytes(0), - pushed_response_body_bytes(0), - client_tcp_reused(false), - client_ssl_reused(false), - client_connection_is_ssl(false), - client_protocol("-"), - client_sec_protocol("-"), - client_cipher_suite("-"), - server_transact_count(0), - server_connection_is_ssl(false), - plugin_tag(nullptr), - plugin_id(0), - hooks_set(false), - cur_hook_id(TS_HTTP_LAST_HOOK), - cur_hook(nullptr), - cur_hooks(0), - callout_state(HTTP_API_NO_CALLOUT), - terminate_sm(false), - kill_this_async_done(false), - parse_range_done(false) -{ - memset(&history, 0, sizeof(history)); - memset(&vc_table, 0, sizeof(vc_table)); - memset(&http_parser, 0, sizeof(http_parser)); +HttpSM::HttpSM() : Continuation(nullptr) +{ + ink_zero(vc_table); + ink_zero(http_parser); } void @@ -3026,8 +2962,7 @@ HttpSM::tunnel_handler_server(int event, HttpTunnelProducer *p) if (close_connection) { p->vc->do_io_close(); - server_session = nullptr; // Because p->vc == server_session - p->read_vio = nullptr; + p->read_vio = nullptr; /* TS-1424: if we're outbound transparent and using the client source port for the outbound connection we must effectively propagate server closes back to the client. Part of that is @@ -3056,6 +2991,12 @@ HttpSM::tunnel_handler_server(int event, HttpTunnelProducer *p) } } + // The server session has been released. Clean all pointer + server_entry->in_tunnel = true; // to avid cleaning in clenup_entry + vc_table.cleanup_entry(server_entry); + server_session = nullptr; // Because p->vc == server_session + server_entry = nullptr; + return 0; } @@ -3378,7 +3319,8 @@ HttpSM::tunnel_handler_cache_write(int event, HttpTunnelConsumer *c) } else { *status_ptr = HttpTransact::CACHE_WRITE_COMPLETE; c->write_success = true; - c->write_vio = c->vc->do_io(VIO::CLOSE); + c->vc->do_io_close(); + c->write_vio = nullptr; } break; default: @@ -3791,7 +3733,7 @@ HttpSM::tunnel_handler_transform_write(int event, HttpTunnelConsumer *c) // has already completed (possible when the // transform intentionally truncates the response). // So close it - c->vc->do_io(VIO::CLOSE); + c->vc->do_io_close(); } break; default: @@ -3886,7 +3828,7 @@ HttpSM::tunnel_handler_plugin_agent(int event, HttpTunnelConsumer *c) // FALLTHROUGH case VC_EVENT_WRITE_COMPLETE: c->write_success = true; - c->vc->do_io(VIO::CLOSE); + c->vc->do_io_close(); break; default: ink_release_assert(0); diff --git a/proxy/http/HttpSM.h b/proxy/http/HttpSM.h index 3fb62d869d4..48def81cc31 100644 --- a/proxy/http/HttpSM.h +++ b/proxy/http/HttpSM.h @@ -271,79 +271,80 @@ class HttpSM : public Continuation const char *client_protocol_contains(ts::StringView tag_prefix) const; ts::StringView find_proto_string(HTTPVersion version) const; - int64_t sm_id; - unsigned int magic; + int64_t sm_id = -1; + unsigned int magic = HTTP_SM_MAGIC_DEAD; // YTS Team, yamsat Plugin - bool enable_redirection; // To check if redirection is enabled - char *redirect_url; // url for force redirect (provide users a functionality to redirect to another url when needed) - int redirect_url_len; - int redirection_tries; // To monitor number of redirections - int64_t transfered_bytes; // Added to calculate POST data - bool post_failed; // Added to identify post failure - bool debug_on; // Transaction specific debug flag + bool enable_redirection = false; // To check if redirection is enabled + char *redirect_url = nullptr; // url for force redirect (provide users a functionality to redirect to another url when needed) + int redirect_url_len = 0; + int redirection_tries = 0; // To monitor number of redirections + int64_t transfered_bytes = 0; // Added to calculate POST data + bool post_failed = false; // Added to identify post failure + bool debug_on = false; // Transaction specific debug flag // Tunneling request to plugin - HttpPluginTunnel_t plugin_tunnel_type; - PluginVCCore *plugin_tunnel; + HttpPluginTunnel_t plugin_tunnel_type = HTTP_NO_PLUGIN_TUNNEL; + PluginVCCore *plugin_tunnel = nullptr; HttpTransact::State t_state; protected: - int reentrancy_count; + int reentrancy_count = 0; struct History { const char *fileline; unsigned short event; short reentrancy; }; - History history[HISTORY_SIZE]; - int history_pos; + History history[HISTORY_SIZE] = {{nullptr, 0, 0}}; + ; + int history_pos = 0; HttpTunnel tunnel; HttpVCTable vc_table; - HttpVCTableEntry *ua_entry; + HttpVCTableEntry *ua_entry = nullptr; void remove_ua_entry(); public: - ProxyClientTransaction *ua_session; - BackgroundFill_t background_fill; + ProxyClientTransaction *ua_session = nullptr; + BackgroundFill_t background_fill = BACKGROUND_FILL_NONE; // AuthHttpAdapter authAdapter; void set_http_schedule(Continuation *); int get_http_schedule(int event, void *data); protected: - IOBufferReader *ua_buffer_reader; - IOBufferReader *ua_raw_buffer_reader; + IOBufferReader *ua_buffer_reader = nullptr; + IOBufferReader *ua_raw_buffer_reader = nullptr; - HttpVCTableEntry *server_entry; - HttpServerSession *server_session; + HttpVCTableEntry *server_entry = nullptr; + HttpServerSession *server_session = nullptr; /* Because we don't want to take a session from a shared pool if we know that it will be private, * but we cannot set it to private until we have an attached server session. * So we use this variable to indicate that * we should create a new connection and then once we attach the session we'll mark it as private. */ - bool will_be_private_ss; - int shared_session_retries; - IOBufferReader *server_buffer_reader; + bool will_be_private_ss = false; + int shared_session_retries = 0; + IOBufferReader *server_buffer_reader = nullptr; void remove_server_entry(); HttpTransformInfo transform_info; HttpTransformInfo post_transform_info; /// Set if plugin client / user agents are active. /// Need primarily for cleanup. - bool has_active_plugin_agents; + bool has_active_plugin_agents = false; HttpCacheSM cache_sm; HttpCacheSM transform_cache_sm; - HttpCacheSM *second_cache_sm; + HttpCacheSM *second_cache_sm = nullptr; - HttpSMHandler default_handler; - Action *pending_action; - Continuation *schedule_cont; + HttpSMHandler default_handler = nullptr; + Action *pending_action = nullptr; + Continuation *schedule_cont = nullptr; HTTPParser http_parser; void start_sub_sm(); @@ -472,8 +473,8 @@ class HttpSM : public Continuation HttpTunnelProducer *setup_transfer_from_transform_to_cache_only(); void setup_plugin_agents(HttpTunnelProducer *p); - HttpTransact::StateMachineAction_t last_action; - int (HttpSM::*m_last_state)(int event, void *data); + HttpTransact::StateMachineAction_t last_action = HttpTransact::SM_ACTION_UNDEFINED; + int (HttpSM::*m_last_state)(int event, void *data) = nullptr; virtual void set_next_state(); void call_transact_and_set_next_state(TransactEntryFunc_t f); @@ -484,52 +485,53 @@ class HttpSM : public Continuation int64_t server_transfer_init(MIOBuffer *buf, int hdr_size); public: + // TODO: Now that bodies can be empty, should the body counters be set to -1 ? TS-2213 // Stats & Logging Info - int client_request_hdr_bytes; - int64_t client_request_body_bytes; - int server_request_hdr_bytes; - int64_t server_request_body_bytes; - int server_response_hdr_bytes; - int64_t server_response_body_bytes; - int client_response_hdr_bytes; - int64_t client_response_body_bytes; - int cache_response_hdr_bytes; - int64_t cache_response_body_bytes; - int pushed_response_hdr_bytes; - int64_t pushed_response_body_bytes; - bool client_tcp_reused; + int client_request_hdr_bytes = 0; + int64_t client_request_body_bytes = 0; + int server_request_hdr_bytes = 0; + int64_t server_request_body_bytes = 0; + int server_response_hdr_bytes = 0; + int64_t server_response_body_bytes = 0; + int client_response_hdr_bytes = 0; + int64_t client_response_body_bytes = 0; + int cache_response_hdr_bytes = 0; + int64_t cache_response_body_bytes = 0; + int pushed_response_hdr_bytes = 0; + int64_t pushed_response_body_bytes = 0; + bool client_tcp_reused = false; // Info about client's SSL connection. - bool client_ssl_reused; - bool client_connection_is_ssl; - const char *client_protocol; - const char *client_sec_protocol; - const char *client_cipher_suite; - int server_transact_count; - bool server_connection_is_ssl; + bool client_ssl_reused = false; + bool client_connection_is_ssl = false; + const char *client_protocol = "-"; + const char *client_sec_protocol = "-"; + const char *client_cipher_suite = "-"; + int server_transact_count = 0; + bool server_connection_is_ssl = false; TransactionMilestones milestones; - ink_hrtime api_timer; + ink_hrtime api_timer = 0; // The next two enable plugins to tag the state machine for // the purposes of logging so the instances can be correlated // with the source plugin. - const char *plugin_tag; - int64_t plugin_id; + const char *plugin_tag = nullptr; + int64_t plugin_id = 0; // hooks_set records whether there are any hooks relevant // to this transaction. Used to avoid costly calls // do_api_callout_internal() - bool hooks_set; + bool hooks_set = false; protected: - TSHttpHookID cur_hook_id; - APIHook *cur_hook; + TSHttpHookID cur_hook_id = TS_HTTP_LAST_HOOK; + APIHook *cur_hook = nullptr; // // Continuation time keeper - int64_t prev_hook_start_time; + int64_t prev_hook_start_time = 0; - int cur_hooks; - HttpApiState_t callout_state; + int cur_hooks = 0; + HttpApiState_t callout_state = HTTP_API_NO_CALLOUT; // api_hooks must not be changed directly // Use txn_hook_{ap,pre}pend so hooks_set is @@ -539,9 +541,9 @@ class HttpSM : public Continuation // The terminate flag is set by handlers and checked by the // main handler who will terminate the state machine // when the flag is set - bool terminate_sm; - bool kill_this_async_done; - bool parse_range_done; + bool terminate_sm = false; + bool kill_this_async_done = false; + bool parse_range_done = false; virtual int kill_this_async_hook(int event, void *data); void kill_this(); void update_stats(); diff --git a/proxy/http/HttpSessionManager.cc b/proxy/http/HttpSessionManager.cc index 22b54801631..1e4334a2336 100644 --- a/proxy/http/HttpSessionManager.cc +++ b/proxy/http/HttpSessionManager.cc @@ -38,7 +38,7 @@ // Initialize a thread to handle HTTP session management void -initialize_thread_for_http_sessions(EThread *thread, int /* thread_index ATS_UNUSED */) +initialize_thread_for_http_sessions(EThread *thread) { thread->server_session_pool = new ServerSessionPool; } @@ -246,6 +246,7 @@ void HttpSessionManager::init() { m_g_pool = new ServerSessionPool; + eventProcessor.schedule_spawn(&initialize_thread_for_http_sessions, ET_NET); } // TODO: Should this really purge all keep-alive sessions? diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc index a333bd29977..f3cb179d368 100644 --- a/proxy/http/HttpTransact.cc +++ b/proxy/http/HttpTransact.cc @@ -6320,7 +6320,7 @@ HttpTransact::is_request_retryable(State *s) // If safe requests are retryable, it should be safe to retry safe requests irrespective of bytes sent or connection state // according to RFC the following methods are safe (https://tools.ietf.org/html/rfc7231#section-4.2.1) // If there was no error establishing the connection (and we sent bytes)-- we cannot retry - if (!(s->txn_conf->safe_requests_retryable && HttpTransactHeaders::is_method_safe(s->method)) && + if (!HttpTransactHeaders::is_method_safe(s->method) && (s->current.state != CONNECTION_ERROR && s->state_machine->server_request_hdr_bytes > 0 && s->state_machine->get_server_session()->get_netvc()->outstanding() != s->state_machine->server_request_hdr_bytes)) { return false; @@ -6729,7 +6729,6 @@ HttpTransact::handle_request_keep_alive_headers(State *s, HTTPVersion ver, HTTPH // Note: if we are 1.1, we always need to send the close // header since persistant connnections are the default break; - case KA_UNKNOWN: default: ink_assert(0); break; @@ -6868,6 +6867,8 @@ HttpTransact::handle_response_keep_alive_headers(State *s, HTTPVersion ver, HTTP } } + ink_assert(ka_action != KA_UNKNOWN); + // Insert K-A headers as necessary switch (ka_action) { case KA_CONNECTION: @@ -6888,7 +6889,6 @@ HttpTransact::handle_response_keep_alive_headers(State *s, HTTPVersion ver, HTTP // Note: if we are 1.1, we always need to send the close // header since persistant connnections are the default break; - case KA_UNKNOWN: default: ink_assert(0); break; diff --git a/proxy/http/remap/RemapProcessor.cc b/proxy/http/remap/RemapProcessor.cc index d24fa5bc54b..1d241a8dffd 100644 --- a/proxy/http/remap/RemapProcessor.cc +++ b/proxy/http/remap/RemapProcessor.cc @@ -30,7 +30,7 @@ int RemapProcessor::start(int num_threads, size_t stacksize) { if (_use_separate_remap_thread) { - ET_REMAP = eventProcessor.spawn_event_threads(num_threads, "ET_REMAP", stacksize); // ET_REMAP is a class member + ET_REMAP = eventProcessor.spawn_event_threads("ET_REMAP", num_threads, stacksize); // ET_REMAP is a class member } return 0; diff --git a/proxy/logstats.cc b/proxy/logstats.cc index f2f31c71104..15c946b6626 100644 --- a/proxy/logstats.cc +++ b/proxy/logstats.cc @@ -904,22 +904,6 @@ update_results_elapsed(OriginStats *stat, int result, int elapsed, int size) update_elapsed(stat->elapsed.misses.refresh, elapsed, stat->results.misses.refresh); update_elapsed(stat->elapsed.misses.total, elapsed, stat->results.misses.total); break; - case SQUID_LOG_ERR_CLIENT_ABORT: - update_counter(stat->results.errors.client_abort, size); - update_counter(stat->results.errors.total, size); - break; - case SQUID_LOG_ERR_CONNECT_FAIL: - update_counter(stat->results.errors.connect_fail, size); - update_counter(stat->results.errors.total, size); - break; - case SQUID_LOG_ERR_INVALID_REQ: - update_counter(stat->results.errors.invalid_req, size); - update_counter(stat->results.errors.total, size); - break; - case SQUID_LOG_ERR_UNKNOWN: - update_counter(stat->results.errors.unknown, size); - update_counter(stat->results.errors.total, size); - break; case SQUID_LOG_TCP_DISK_HIT: case SQUID_LOG_TCP_REF_FAIL_HIT: case SQUID_LOG_UDP_HIT: @@ -938,12 +922,29 @@ update_results_elapsed(OriginStats *stat, int result, int elapsed, int size) update_elapsed(stat->elapsed.misses.other, elapsed, stat->results.misses.other); update_elapsed(stat->elapsed.misses.total, elapsed, stat->results.misses.total); break; + case SQUID_LOG_ERR_CLIENT_ABORT: + update_counter(stat->results.errors.client_abort, size); + update_counter(stat->results.errors.total, size); + break; + case SQUID_LOG_ERR_CONNECT_FAIL: + update_counter(stat->results.errors.connect_fail, size); + update_counter(stat->results.errors.total, size); + break; + case SQUID_LOG_ERR_INVALID_REQ: + update_counter(stat->results.errors.invalid_req, size); + update_counter(stat->results.errors.total, size); + break; + case SQUID_LOG_ERR_UNKNOWN: + update_counter(stat->results.errors.unknown, size); + update_counter(stat->results.errors.total, size); + break; default: - if ((result >= SQUID_LOG_ERR_READ_TIMEOUT) && (result <= SQUID_LOG_ERR_UNKNOWN)) { + // This depends on all errors being at the end of the enum ... Which is the case right now. + if (result < SQUID_LOG_ERR_READ_TIMEOUT) { + update_counter(stat->results.other, size); + } else { update_counter(stat->results.errors.other, size); update_counter(stat->results.errors.total, size); - } else { - update_counter(stat->results.other, size); } break; } @@ -2358,7 +2359,9 @@ open_main_log(ExitStatus &status) return -1; } #if HAVE_POSIX_FADVISE - posix_fadvise(main_fd, 0, 0, POSIX_FADV_DONTNEED); + if (0 != posix_fadvise(main_fd, 0, 0, POSIX_FADV_DONTNEED)) { + status.append(" posix_fadvise() failed"); + } #endif return main_fd; } @@ -2575,6 +2578,7 @@ main(int /* argc ATS_UNUSED */, const char *argv[]) exit_status.set(EXIT_WARNING, " can't read log directory"); } else { while ((dp = readdir(dirp)) != NULL) { + // coverity[fs_check_call] if (stat(dp->d_name, &stat_buf) < 0) { exit_status.set(EXIT_WARNING, " can't stat "); exit_status.append(dp->d_name); @@ -2629,7 +2633,9 @@ main(int /* argc ATS_UNUSED */, const char *argv[]) } // flock(state_fd, LOCK_UN); lck.l_type = F_UNLCK; - fcntl(state_fd, F_SETLK, &lck); + if (fcntl(state_fd, F_SETLK, &lck) < 0) { + exit_status.set(EXIT_WARNING, " can't unlock state_fd "); + } close(main_fd); close(state_fd); } else { diff --git a/proxy/shared/UglyLogStubs.cc b/proxy/shared/UglyLogStubs.cc index 5371b802d41..0dc3de1bce8 100644 --- a/proxy/shared/UglyLogStubs.cc +++ b/proxy/shared/UglyLogStubs.cc @@ -122,6 +122,12 @@ UnixNetProcessor::createNetAccept(const NetProcessor::AcceptOptions &opt) return nullptr; } +void +UnixNetProcessor::init() +{ + ink_release_assert(false); +} + // TODO: The following was necessary only for Solaris, should examine more. NetVCOptions const Connection::DEFAULT_OPTIONS; NetProcessor::AcceptOptions const NetProcessor::DEFAULT_ACCEPT_OPTIONS; @@ -156,13 +162,6 @@ CacheVC::handleWrite(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) UnixNetProcessor unix_netProcessor; NetProcessor &netProcessor = unix_netProcessor; -int -UnixNetProcessor::start(int, size_t) -{ - ink_release_assert(false); - return 0; -} - Action * NetProcessor::accept(Continuation * /* cont ATS_UNUSED */, AcceptOptions const & /* opt ATS_UNUSED */) { diff --git a/tests/gold_tests/remap/gold/remap-https-200_2.gold b/tests/gold_tests/remap/gold/remap-https-200_2.gold new file mode 100644 index 00000000000..4bb24ea4e34 --- /dev/null +++ b/tests/gold_tests/remap/gold/remap-https-200_2.gold @@ -0,0 +1,13 @@ +`` +> GET / HTTP/1.1 +> Host: www.anotherexample.com`` +> User-Agent: curl/`` +`` +< HTTP/1.1 200 OK +< Date: `` +< Age: `` +< Transfer-Encoding: chunked +< Connection: keep-alive +< Server: ATS/`` +< +`` diff --git a/tests/gold_tests/remap/remap_https.test.py b/tests/gold_tests/remap/remap_https.test.py index 7c744786732..84e6587214a 100644 --- a/tests/gold_tests/remap/remap_https.test.py +++ b/tests/gold_tests/remap/remap_https.test.py @@ -28,6 +28,7 @@ # Define default ATS ts=Test.MakeATSProcess("ts",select_ports=False) server=Test.MakeOriginServer("server") +server2=Test.MakeOriginServer("server2",ssl=True) #**testname is required** testName = "" @@ -35,6 +36,7 @@ #desired response form the origin server response_header={"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) +server2.addResponse("sessionlog.json", request_header, response_header) #add ssl materials like key, certificates for the server ts.addSSLfile("ssl/server.pem") @@ -58,6 +60,9 @@ ts.Disk.remap_config.AddLine( 'map https://www.example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port,ts.Variables.ssl_port) ) +ts.Disk.remap_config.AddLine( + 'map https://www.anotherexample.com https://127.0.0.1:{0}'.format(server2.Variables.Port,ts.Variables.ssl_port) +) ts.Disk.ssl_multicert_config.AddLine( @@ -71,6 +76,7 @@ # time delay as proxy.config.http.wait_for_cache could be broken tr.Processes.Default.StartBefore(server) +tr.Processes.Default.StartBefore(server2) # Delay on readyness of our ssl ports tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port)) tr.Processes.Default.Streams.stderr="gold/remap-hitATS-404.gold" @@ -109,4 +115,11 @@ tr.Processes.Default.ReturnCode=0 tr.Processes.Default.Streams.stderr="gold/remap-hitATS-404.gold" +# map www.anotherexample.com to https://.com +tr=Test.AddTestRun() +tr.Processes.Default.Command='curl --http1.1 -k https://127.0.0.1:{0} -H "Host: www.anotherexample.com" --verbose'.format(ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode=0 +tr.Processes.Default.Streams.stderr="gold/remap-https-200_2.gold" +tr.StillRunningAfter=server2 + diff --git a/tools/jtest/jtest.cc b/tools/jtest/jtest.cc index 61298b70609..3bde9e10237 100644 --- a/tools/jtest/jtest.cc +++ b/tools/jtest/jtest.cc @@ -749,7 +749,7 @@ send_response(int sock) char *url_start = nullptr; char *url_end = nullptr; int err = 0, towrite; - int url_len; + int url_len = 0; if (fd[sock].req_pos >= 0) { char header[1024]; @@ -2093,10 +2093,10 @@ static void extract_urls(char *buf, int buflen, char *base_url) { // if (verbose) printf("EXTRACT<<%s\n>>", buf); - char *start = nullptr; - char *end = nullptr; - char old_base[512]; - strcpy(old_base, base_url); + char *start = nullptr; + char *end = nullptr; + char old_base[512] = {0}; + strncpy(old_base, base_url, sizeof(old_base) - 1); start = strncasestr(buf, "